From 28c8aa7270a04adea69065477b13cfc0dd244acc Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 14 Jan 2015 12:24:09 -0700 Subject: Drop reliance on dmidecode executable. --- cloudinit/sources/DataSourceAltCloud.py | 27 ++++++++------------------- cloudinit/sources/DataSourceCloudSigma.py | 22 ++++++++++------------ cloudinit/sources/DataSourceSmartOS.py | 25 ++++++------------------- 3 files changed, 24 insertions(+), 50 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 1e913a6e..1b0f72a1 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -40,7 +40,6 @@ LOG = logging.getLogger(__name__) CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' # Shell command lists -CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name'] CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy'] CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] @@ -100,11 +99,7 @@ class DataSourceAltCloud(sources.DataSource): ''' Description: Get the type for the cloud back end this instance is running on - by examining the string returned by: - dmidecode --string system-product-name - - On VMWare/vSphere dmidecode returns: RHEV Hypervisor - On VMWare/vSphere dmidecode returns: VMware Virtual Platform + by examining the string returned by reading the dmi data. Input: None @@ -117,26 +112,20 @@ class DataSourceAltCloud(sources.DataSource): uname_arch = os.uname()[4] if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process + # Disabling because dmi data is not available on ARM processors LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)") return 'UNKNOWN' - cmd = CMD_DMI_SYSTEM - try: - (cmd_out, _err) = util.subp(cmd) - except ProcessExecutionError, _err: - LOG.debug(('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message)) - return 'UNKNOWN' - except OSError, _err: - LOG.debug(('Failed command: %s\n%s') % \ - (' '.join(cmd), _err.message)) + system_name = util.read_dmi_data("system-product-name") + if not system_name: return 'UNKNOWN' - if cmd_out.upper().startswith('RHEV'): + sys_name = system_name.upper() + + if sys_name.startswith('RHEV'): return 'RHEV' - if cmd_out.upper().startswith('VMWARE'): + if sys_name.startswith('VMWARE'): return 'VSPHERE' return 'UNKNOWN' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 707cd0ce..76597116 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -44,27 +44,25 @@ class DataSourceCloudSigma(sources.DataSource): def is_running_in_cloudsigma(self): """ - Uses dmidecode to detect if this instance of cloud-init is running + Uses dmi data to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ uname_arch = os.uname()[4] if uname_arch.startswith("arm") or uname_arch == "aarch64": - # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process + # Disabling because dmi data on ARM processors LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") return False - dmidecode_path = util.which('dmidecode') - if not dmidecode_path: + LOG.debug("determining hypervisor product name via dmi data") + sys_product_name = util.read_dmi_data("system-product-name") + if not sys_product_name: + LOG.warn("failed to get hypervisor product name via dmi data") return False + else: + LOG.debug("detected hypervisor as {}".format(sys_product_name)) + return 'cloudsigma' in sys_product_name.lower() - LOG.debug("Determining hypervisor product name via dmidecode") - try: - cmd = [dmidecode_path, "--string", "system-product-name"] - system_product_name, _ = util.subp(cmd) - return 'cloudsigma' in system_product_name.lower() - except: - LOG.warn("Failed to get hypervisor product name via dmidecode") - + LOG.warn("failed to query dmi data for system product name") return False def get_data(self): diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 2733a2f6..86b8775a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -358,26 +358,13 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, def dmi_data(): - sys_uuid, sys_type = None, None - dmidecode_path = util.which('dmidecode') - if not dmidecode_path: - return False + sys_uuid = util.read_dmi_data("system-uuid") + sys_type = util.read_dmi_data("system-product-name") + + if not sys_uuid or not sys_type: + return None - sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"] - try: - LOG.debug("Getting hostname from dmidecode") - (sys_uuid, _err) = util.subp(sys_uuid_cmd) - except Exception as e: - util.logexc(LOG, "Failed to get system UUID", e) - - sys_type_cmd = [dmidecode_path, "-s", "system-product-name"] - try: - LOG.debug("Determining hypervisor product name via dmidecode") - (sys_type, _err) = util.subp(sys_type_cmd) - except Exception as e: - util.logexc(LOG, "Failed to get system UUID", e) - - return (sys_uuid.lower().strip(), sys_type.strip()) + return (sys_uuid.lower(), sys_type) def write_boot_content(content, content_f, link=None, shebang=False, -- cgit v1.2.3 From 063d33bf8bb277744abab2c1fff44af665dc2545 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 20 Jan 2015 15:59:35 +0000 Subject: New Azure disk_setup default. --- cloudinit/sources/DataSourceAzure.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 09bc196d..2ba1e2ad 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -53,9 +53,9 @@ BUILTIN_DS_CONFIG = { BUILTIN_CLOUD_CONFIG = { 'disk_setup': { - 'ephemeral0': {'table_type': 'mbr', - 'layout': True, - 'overwrite': False}, + 'ephemeral0': {'table_type': 'gpt', + 'layout': [100], + 'overwrite': True}, }, 'fs_setup': [{'filesystem': 'ext4', 'device': 'ephemeral0.1', -- cgit v1.2.3 From f895cb12141281702b34da18f2384deb64c881e7 Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Wed, 21 Jan 2015 17:56:53 -0500 Subject: Largely merge lp:~harlowja/cloud-init/py2-3 albeit manually because it seemed to be behind trunk. `tox -e py27` passes full test suite. Now to work on replacing mocker. --- cloudinit/config/cc_apt_configure.py | 2 +- cloudinit/config/cc_debug.py | 7 +- cloudinit/config/cc_landscape.py | 2 +- cloudinit/config/cc_mcollective.py | 15 +-- cloudinit/config/cc_phone_home.py | 4 +- cloudinit/config/cc_puppet.py | 8 +- cloudinit/config/cc_resolv_conf.py | 4 +- cloudinit/config/cc_seed_random.py | 3 +- cloudinit/config/cc_ssh.py | 16 +-- cloudinit/config/cc_yum_add_repo.py | 7 +- cloudinit/distros/__init__.py | 55 ++++++----- cloudinit/distros/arch.py | 2 +- cloudinit/distros/freebsd.py | 12 ++- cloudinit/distros/net_util.py | 2 +- cloudinit/distros/parsers/hostname.py | 2 +- cloudinit/distros/parsers/hosts.py | 2 +- cloudinit/distros/parsers/resolv_conf.py | 2 +- cloudinit/distros/parsers/sys_conf.py | 5 +- cloudinit/distros/rhel.py | 2 +- cloudinit/distros/sles.py | 2 +- cloudinit/ec2_utils.py | 9 +- cloudinit/handlers/__init__.py | 2 +- cloudinit/handlers/boot_hook.py | 2 +- cloudinit/handlers/cloud_config.py | 2 +- cloudinit/handlers/shell_script.py | 2 +- cloudinit/handlers/upstart_job.py | 2 +- cloudinit/helpers.py | 13 +-- cloudinit/log.py | 7 +- cloudinit/mergers/__init__.py | 4 +- cloudinit/mergers/m_dict.py | 4 +- cloudinit/mergers/m_list.py | 6 +- cloudinit/mergers/m_str.py | 10 +- cloudinit/netinfo.py | 4 +- cloudinit/signal_handler.py | 2 +- cloudinit/sources/DataSourceConfigDrive.py | 4 +- cloudinit/sources/DataSourceDigitalOcean.py | 9 +- cloudinit/sources/DataSourceEc2.py | 4 +- cloudinit/sources/DataSourceMAAS.py | 2 +- cloudinit/sources/DataSourceOVF.py | 6 +- cloudinit/sources/DataSourceSmartOS.py | 15 +-- cloudinit/sources/__init__.py | 10 +- cloudinit/sources/helpers/openstack.py | 10 +- cloudinit/ssh_util.py | 6 +- cloudinit/stages.py | 23 ++--- cloudinit/type_utils.py | 32 ++++-- cloudinit/url_helper.py | 22 +++-- cloudinit/user_data.py | 8 +- cloudinit/util.py | 109 +++++++++++++-------- packages/bddeb | 1 + packages/brpm | 2 + tests/unittests/test_data.py | 12 +-- tests/unittests/test_datasource/test_nocloud.py | 2 +- tests/unittests/test_datasource/test_openstack.py | 7 +- tests/unittests/test_distros/test_netconfig.py | 4 +- .../test_handler/test_handler_apt_configure.py | 10 +- .../unittests/test_handler/test_handler_locale.py | 6 +- .../test_handler/test_handler_seed_random.py | 2 +- .../test_handler/test_handler_set_hostname.py | 6 +- .../test_handler/test_handler_timezone.py | 6 +- .../test_handler/test_handler_yum_add_repo.py | 7 +- 60 files changed, 315 insertions(+), 233 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index f10b76a3..de72903f 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -126,7 +126,7 @@ def mirror2lists_fileprefix(mirror): def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"): - for (name, omirror) in old_mirrors.iteritems(): + for (name, omirror) in old_mirrors.items(): nmirror = new_mirrors.get(name) if not nmirror: continue diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py index 8c489426..bdc32fe6 100644 --- a/cloudinit/config/cc_debug.py +++ b/cloudinit/config/cc_debug.py @@ -34,7 +34,8 @@ It can be configured with the following option structure:: """ import copy -from StringIO import StringIO + +from six import StringIO from cloudinit import type_utils from cloudinit import util @@ -77,7 +78,7 @@ def handle(name, cfg, cloud, log, args): dump_cfg = copy.deepcopy(cfg) for k in SKIP_KEYS: dump_cfg.pop(k, None) - all_keys = list(dump_cfg.keys()) + all_keys = list(dump_cfg) for k in all_keys: if k.startswith("_"): dump_cfg.pop(k, None) @@ -103,6 +104,6 @@ def handle(name, cfg, cloud, log, args): line = "ci-info: %s\n" % (line) content_to_file.append(line) if out_file: - util.write_file(out_file, "".join(content_to_file), 0644, "w") + util.write_file(out_file, "".join(content_to_file), 0o644, "w") else: util.multi_log("".join(content_to_file), console=True, stderr=False) diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py index 8a709677..0b9d846e 100644 --- a/cloudinit/config/cc_landscape.py +++ b/cloudinit/config/cc_landscape.py @@ -20,7 +20,7 @@ import os -from StringIO import StringIO +from six import StringIO from configobj import ConfigObj diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py index b670390d..425420ae 100644 --- a/cloudinit/config/cc_mcollective.py +++ b/cloudinit/config/cc_mcollective.py @@ -19,7 +19,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +import six +from six import StringIO # Used since this can maintain comments # and doesn't need a top level section @@ -51,17 +52,17 @@ def handle(name, cfg, cloud, log, _args): # original file in order to be able to mix the rest up mcollective_config = ConfigObj(SERVER_CFG) # See: http://tiny.cc/jh9agw - for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems(): + for (cfg_name, cfg) in mcollective_cfg['conf'].items(): if cfg_name == 'public-cert': - util.write_file(PUBCERT_FILE, cfg, mode=0644) + util.write_file(PUBCERT_FILE, cfg, mode=0o644) mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE mcollective_config['securityprovider'] = 'ssl' elif cfg_name == 'private-cert': - util.write_file(PRICERT_FILE, cfg, mode=0600) + util.write_file(PRICERT_FILE, cfg, mode=0o600) mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE mcollective_config['securityprovider'] = 'ssl' else: - if isinstance(cfg, (basestring, str)): + if isinstance(cfg, six.string_types): # Just set it in the 'main' section mcollective_config[cfg_name] = cfg elif isinstance(cfg, (dict)): @@ -69,7 +70,7 @@ def handle(name, cfg, cloud, log, _args): # if it is needed and then add/or create items as needed if cfg_name not in mcollective_config.sections: mcollective_config[cfg_name] = {} - for (o, v) in cfg.iteritems(): + for (o, v) in cfg.items(): mcollective_config[cfg_name][o] = v else: # Otherwise just try to convert it to a string @@ -81,7 +82,7 @@ def handle(name, cfg, cloud, log, _args): contents = StringIO() mcollective_config.write(contents) contents = contents.getvalue() - util.write_file(SERVER_CFG, contents, mode=0644) + util.write_file(SERVER_CFG, contents, mode=0o644) # Start mcollective util.subp(['service', 'mcollective', 'start'], capture=False) diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py index 5bc68b83..18a7ddad 100644 --- a/cloudinit/config/cc_phone_home.py +++ b/cloudinit/config/cc_phone_home.py @@ -81,7 +81,7 @@ def handle(name, cfg, cloud, log, args): 'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub', } - for (n, path) in pubkeys.iteritems(): + for (n, path) in pubkeys.items(): try: all_keys[n] = util.load_file(path) except: @@ -99,7 +99,7 @@ def handle(name, cfg, cloud, log, args): # Get them read to be posted real_submit_keys = {} - for (k, v) in submit_keys.iteritems(): + for (k, v) in submit_keys.items(): if v is None: real_submit_keys[k] = 'N/A' else: diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 471a1a8a..6f1b3c57 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -18,7 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +from six import StringIO import os import socket @@ -81,13 +81,13 @@ def handle(name, cfg, cloud, log, _args): cleaned_contents = '\n'.join(cleaned_lines) puppet_config.readfp(StringIO(cleaned_contents), filename=PUPPET_CONF_PATH) - for (cfg_name, cfg) in puppet_cfg['conf'].iteritems(): + for (cfg_name, cfg) in puppet_cfg['conf'].items(): # Cert configuration is a special case # Dump the puppet master ca certificate in the correct place if cfg_name == 'ca_cert': # Puppet ssl sub-directory isn't created yet # Create it with the proper permissions and ownership - util.ensure_dir(PUPPET_SSL_DIR, 0771) + util.ensure_dir(PUPPET_SSL_DIR, 0o771) util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root') util.ensure_dir(PUPPET_SSL_CERT_DIR) util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root') @@ -96,7 +96,7 @@ def handle(name, cfg, cloud, log, _args): else: # Iterate throug the config items, we'll use ConfigParser.set # to overwrite or create new items as needed - for (o, v) in cfg.iteritems(): + for (o, v) in cfg.items(): if o == 'certname': # Expand %f as the fqdn # TODO(harlowja) should this use the cloud fqdn?? diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py index bbaa6c63..71d9e3a7 100644 --- a/cloudinit/config/cc_resolv_conf.py +++ b/cloudinit/config/cc_resolv_conf.py @@ -66,8 +66,8 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"): false_flags = [] if 'options' in params: - for key, val in params['options'].iteritems(): - if type(val) == bool: + for key, val in params['options'].items(): + if isinstance(val, bool): if val: flags.append(key) else: diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 49a6b3e8..3b7235bf 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -21,7 +21,8 @@ import base64 import os -from StringIO import StringIO + +from six import StringIO from cloudinit.settings import PER_INSTANCE from cloudinit import log as logging diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 4c76581c..ab6940fa 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -34,12 +34,12 @@ DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," "rather than the user \\\"root\\\".\';echo;sleep 10\"") KEY_2_FILE = { - "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600), - "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644), - "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600), - "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644), - "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600), - "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644), + "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0o600), + "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0o644), + "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0o600), + "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0o644), + "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0o600), + "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0o644), } PRIV_2_PUB = { @@ -68,13 +68,13 @@ def handle(_name, cfg, cloud, log, _args): if "ssh_keys" in cfg: # if there are keys in cloud-config, use them - for (key, val) in cfg["ssh_keys"].iteritems(): + for (key, val) in cfg["ssh_keys"].items(): if key in KEY_2_FILE: tgt_fn = KEY_2_FILE[key][0] tgt_perms = KEY_2_FILE[key][1] util.write_file(tgt_fn, val, tgt_perms) - for (priv, pub) in PRIV_2_PUB.iteritems(): + for (priv, pub) in PRIV_2_PUB.items(): if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']: continue pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0]) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 0d836f28..3b821af9 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -18,9 +18,10 @@ import os -from cloudinit import util - import configobj +import six + +from cloudinit import util def _canonicalize_id(repo_id): @@ -37,7 +38,7 @@ def _format_repo_value(val): # Can handle 'lists' in certain cases # See: http://bit.ly/Qqrf1t return "\n ".join([_format_repo_value(v) for v in val]) - if not isinstance(val, (basestring, str)): + if not isinstance(val, six.string_types): return str(val) return val diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 49a0b652..4ebccdda 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -21,7 +21,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +import six +from six import StringIO import abc import itertools @@ -334,7 +335,7 @@ class Distro(object): redact_opts = ['passwd'] # Check the values and create the command - for key, val in kwargs.iteritems(): + for key, val in kwargs.items(): if key in adduser_opts and val and isinstance(val, str): adduser_cmd.extend([adduser_opts[key], val]) @@ -393,7 +394,7 @@ class Distro(object): if 'ssh_authorized_keys' in kwargs: # Try to handle this in a smart manner. keys = kwargs['ssh_authorized_keys'] - if isinstance(keys, (basestring, str)): + if isinstance(keys, six.string_types): keys = [keys] if isinstance(keys, dict): keys = list(keys.values()) @@ -491,7 +492,7 @@ class Distro(object): if isinstance(rules, (list, tuple)): for rule in rules: lines.append("%s %s" % (user, rule)) - elif isinstance(rules, (basestring, str)): + elif isinstance(rules, six.string_types): lines.append("%s %s" % (user, rules)) else: msg = "Can not create sudoers rule addition with type %r" @@ -561,10 +562,10 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, subst['ec2_region'] = "%s" % availability_zone[0:-1] results = {} - for (name, mirror) in mirror_info.get('failsafe', {}).iteritems(): + for (name, mirror) in mirror_info.get('failsafe', {}).items(): results[name] = mirror - for (name, searchlist) in mirror_info.get('search', {}).iteritems(): + for (name, searchlist) in mirror_info.get('search', {}).items(): mirrors = [] for tmpl in searchlist: try: @@ -604,30 +605,30 @@ def _get_arch_package_mirror_info(package_mirrors, arch): # is the standard form used in the rest # of cloud-init def _normalize_groups(grp_cfg): - if isinstance(grp_cfg, (str, basestring)): + if isinstance(grp_cfg, six.string_types): grp_cfg = grp_cfg.strip().split(",") - if isinstance(grp_cfg, (list)): + if isinstance(grp_cfg, list): c_grp_cfg = {} for i in grp_cfg: - if isinstance(i, (dict)): + if isinstance(i, dict): for k, v in i.items(): if k not in c_grp_cfg: - if isinstance(v, (list)): + if isinstance(v, list): c_grp_cfg[k] = list(v) - elif isinstance(v, (basestring, str)): + elif isinstance(v, six.string_types): c_grp_cfg[k] = [v] else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) else: - if isinstance(v, (list)): + if isinstance(v, list): c_grp_cfg[k].extend(v) - elif isinstance(v, (basestring, str)): + elif isinstance(v, six.string_types): c_grp_cfg[k].append(v) else: raise TypeError("Bad group member type %s" % type_utils.obj_name(v)) - elif isinstance(i, (str, basestring)): + elif isinstance(i, six.string_types): if i not in c_grp_cfg: c_grp_cfg[i] = [] else: @@ -635,7 +636,7 @@ def _normalize_groups(grp_cfg): type_utils.obj_name(i)) grp_cfg = c_grp_cfg groups = {} - if isinstance(grp_cfg, (dict)): + if isinstance(grp_cfg, dict): for (grp_name, grp_members) in grp_cfg.items(): groups[grp_name] = util.uniq_merge_sorted(grp_members) else: @@ -661,29 +662,29 @@ def _normalize_groups(grp_cfg): # entry 'default' which will be marked as true # all other users will be marked as false. def _normalize_users(u_cfg, def_user_cfg=None): - if isinstance(u_cfg, (dict)): + if isinstance(u_cfg, dict): ad_ucfg = [] for (k, v) in u_cfg.items(): - if isinstance(v, (bool, int, basestring, str, float)): + if isinstance(v, (bool, int, float) + six.string_types): if util.is_true(v): ad_ucfg.append(str(k)) - elif isinstance(v, (dict)): + elif isinstance(v, dict): v['name'] = k ad_ucfg.append(v) else: raise TypeError(("Unmappable user value type %s" " for key %s") % (type_utils.obj_name(v), k)) u_cfg = ad_ucfg - elif isinstance(u_cfg, (str, basestring)): + elif isinstance(u_cfg, six.string_types): u_cfg = util.uniq_merge_sorted(u_cfg) users = {} for user_config in u_cfg: - if isinstance(user_config, (str, basestring, list)): + if isinstance(user_config, (list,) + six.string_types): for u in util.uniq_merge(user_config): if u and u not in users: users[u] = {} - elif isinstance(user_config, (dict)): + elif isinstance(user_config, dict): if 'name' in user_config: n = user_config.pop('name') prev_config = users.get(n) or {} @@ -784,11 +785,11 @@ def normalize_users_groups(cfg, distro): old_user = cfg['user'] # Translate it into the format that is more useful # going forward - if isinstance(old_user, (basestring, str)): + if isinstance(old_user, six.string_types): old_user = { 'name': old_user, } - if not isinstance(old_user, (dict)): + if not isinstance(old_user, dict): LOG.warn(("Format for 'user' key must be a string or " "dictionary and not %s"), type_utils.obj_name(old_user)) old_user = {} @@ -813,7 +814,7 @@ def normalize_users_groups(cfg, distro): default_user_config = util.mergemanydict([old_user, distro_user_config]) base_users = cfg.get('users', []) - if not isinstance(base_users, (list, dict, str, basestring)): + if not isinstance(base_users, (list, dict) + six.string_types): LOG.warn(("Format for 'users' key must be a comma separated string" " or a dictionary or a list and not %s"), type_utils.obj_name(base_users)) @@ -822,12 +823,12 @@ def normalize_users_groups(cfg, distro): if old_user: # Ensure that when user: is provided that this user # always gets added (as the default user) - if isinstance(base_users, (list)): + if isinstance(base_users, list): # Just add it on at the end... base_users.append({'name': 'default'}) - elif isinstance(base_users, (dict)): + elif isinstance(base_users, dict): base_users['default'] = dict(base_users).get('default', True) - elif isinstance(base_users, (str, basestring)): + elif isinstance(base_users, six.string_types): # Just append it on to be re-parsed later base_users += ",default" diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 68bf1aab..e540e0bc 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -66,7 +66,7 @@ class Distro(distros.Distro): settings, entries) dev_names = entries.keys() # Format for netctl - for (dev, info) in entries.iteritems(): + for (dev, info) in entries.items(): nameservers = [] net_fn = self.network_conf_dir + dev net_cfg = { diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index f1b4a256..4c484639 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -16,7 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +import six +from six import StringIO import re @@ -203,8 +204,9 @@ class Distro(distros.Distro): redact_opts = ['passwd'] - for key, val in kwargs.iteritems(): - if key in adduser_opts and val and isinstance(val, basestring): + for key, val in kwargs.items(): + if (key in adduser_opts and val + and isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs @@ -271,7 +273,7 @@ class Distro(distros.Distro): nameservers = [] searchdomains = [] dev_names = entries.keys() - for (device, info) in entries.iteritems(): + for (device, info) in entries.items(): # Skip the loopback interface. if device.startswith('lo'): continue @@ -323,7 +325,7 @@ class Distro(distros.Distro): resolvconf.add_search_domain(domain) except ValueError: util.logexc(LOG, "Failed to add search domain %s", domain) - util.write_file(self.resolv_conf_fn, str(resolvconf), 0644) + util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644) return dev_names diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 8b28e2d1..cadfa6b6 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -103,7 +103,7 @@ def translate_network(settings): consume[cmd] = args # Check if anything left over to consume absorb = False - for (cmd, args) in consume.iteritems(): + for (cmd, args) in consume.items(): if cmd == 'iface': absorb = True if absorb: diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 617b3c36..84a1de42 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +from six import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py index 94c97051..3c5498ee 100644 --- a/cloudinit/distros/parsers/hosts.py +++ b/cloudinit/distros/parsers/hosts.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +from six import StringIO from cloudinit.distros.parsers import chop_comment diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 5733c25a..8aee03a4 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -16,7 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +from six import StringIO from cloudinit import util diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index 20ca1871..d795e12f 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -16,7 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO +import six +from six import StringIO import pipes import re @@ -69,7 +70,7 @@ class SysConf(configobj.ConfigObj): return out_contents.getvalue() def _quote(self, value, multiline=False): - if not isinstance(value, (str, basestring)): + if not isinstance(value, six.string_types): raise ValueError('Value "%s" is not a string' % (value)) if len(value) == 0: return '' diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py index d9588632..7408989c 100644 --- a/cloudinit/distros/rhel.py +++ b/cloudinit/distros/rhel.py @@ -73,7 +73,7 @@ class Distro(distros.Distro): searchservers = [] dev_names = entries.keys() use_ipv6 = False - for (dev, info) in entries.iteritems(): + for (dev, info) in entries.items(): net_fn = self.network_script_tpl % (dev) net_cfg = { 'DEVICE': dev, diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 43682a12..0c6d1203 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -62,7 +62,7 @@ class Distro(distros.Distro): nameservers = [] searchservers = [] dev_names = entries.keys() - for (dev, info) in entries.iteritems(): + for (dev, info) in entries.items(): net_fn = self.network_script_tpl % (dev) mode = info.get('auto') if mode and mode.lower() == 'true': diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index e69d06ff..e1ed4091 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -17,7 +17,6 @@ # along with this program. If not, see . import functools -import httplib import json from cloudinit import log as logging @@ -25,7 +24,7 @@ from cloudinit import url_helper from cloudinit import util LOG = logging.getLogger(__name__) -SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND]) +SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND]) class MetadataLeafDecoder(object): @@ -123,7 +122,7 @@ class MetadataMaterializer(object): leaf_contents = {} for (field, resource) in leaves.items(): leaf_url = url_helper.combine_url(base_url, resource) - leaf_blob = str(self._caller(leaf_url)) + leaf_blob = self._caller(leaf_url).contents leaf_contents[field] = self._leaf_decoder(field, leaf_blob) joined = {} joined.update(child_contents) @@ -160,7 +159,7 @@ def get_instance_userdata(api_version='latest', timeout=timeout, retries=retries, exception_cb=exception_cb) - user_data = str(response) + user_data = response.contents except url_helper.UrlError as e: if e.code not in SKIP_USERDATA_CODES: util.logexc(LOG, "Failed fetching userdata from url %s", ud_url) @@ -183,7 +182,7 @@ def get_instance_metadata(api_version='latest', try: response = caller(md_url) - materializer = MetadataMaterializer(str(response), + materializer = MetadataMaterializer(response.contents, md_url, caller, leaf_decoder=leaf_decoder) md = materializer.materialize() diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py index 059d7495..d67a70ea 100644 --- a/cloudinit/handlers/__init__.py +++ b/cloudinit/handlers/__init__.py @@ -147,7 +147,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload): if not modfname.endswith(".py"): modfname = "%s.py" % (modfname) # TODO(harlowja): Check if path exists?? - util.write_file(modfname, payload, 0600) + util.write_file(modfname, payload, 0o600) handlers = pdata['handlers'] try: mod = fixup_handler(importer.import_module(modname)) diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py index 3a50cf87..a4ea47ac 100644 --- a/cloudinit/handlers/boot_hook.py +++ b/cloudinit/handlers/boot_hook.py @@ -50,7 +50,7 @@ class BootHookPartHandler(handlers.Handler): filepath = os.path.join(self.boothook_dir, filename) contents = util.strip_prefix_suffix(util.dos2unix(payload), prefix=BOOTHOOK_PREFIX) - util.write_file(filepath, contents.lstrip(), 0700) + util.write_file(filepath, contents.lstrip(), 0o700) return filepath def handle_part(self, data, ctype, filename, payload, frequency): diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py index bf994e33..07b6d0e0 100644 --- a/cloudinit/handlers/cloud_config.py +++ b/cloudinit/handlers/cloud_config.py @@ -95,7 +95,7 @@ class CloudConfigPartHandler(handlers.Handler): lines.append(util.yaml_dumps(self.cloud_buf)) else: lines = [] - util.write_file(self.cloud_fn, "\n".join(lines), 0600) + util.write_file(self.cloud_fn, "\n".join(lines), 0o600) def _extract_mergers(self, payload, headers): merge_header_headers = '' diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py index 9755ab05..b5087693 100644 --- a/cloudinit/handlers/shell_script.py +++ b/cloudinit/handlers/shell_script.py @@ -52,4 +52,4 @@ class ShellScriptPartHandler(handlers.Handler): filename = util.clean_filename(filename) payload = util.dos2unix(payload) path = os.path.join(self.script_dir, filename) - util.write_file(path, payload, 0700) + util.write_file(path, payload, 0o700) diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py index 50d193c4..c5bea711 100644 --- a/cloudinit/handlers/upstart_job.py +++ b/cloudinit/handlers/upstart_job.py @@ -65,7 +65,7 @@ class UpstartJobPartHandler(handlers.Handler): payload = util.dos2unix(payload) path = os.path.join(self.upstart_dir, filename) - util.write_file(path, payload, 0644) + util.write_file(path, payload, 0o644) if SUITABLE_UPSTART: util.subp(["initctl", "reload-configuration"], capture=False) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index e701126e..ed396b5a 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -23,10 +23,11 @@ from time import time import contextlib -import io import os -from ConfigParser import (NoSectionError, NoOptionError, RawConfigParser) +import six +from six.moves.configparser import ( + NoSectionError, NoOptionError, RawConfigParser) from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, CFG_ENV_NAME) @@ -318,10 +319,10 @@ class ContentHandlers(object): return self.registered[content_type] def items(self): - return self.registered.items() + return list(self.registered.items()) - def iteritems(self): - return self.registered.iteritems() + # XXX This should really go away. + iteritems = items class Paths(object): @@ -449,7 +450,7 @@ class DefaultingConfigParser(RawConfigParser): def stringify(self, header=None): contents = '' - with io.BytesIO() as outputstream: + with six.StringIO() as outputstream: self.write(outputstream) outputstream.flush() contents = outputstream.getvalue() diff --git a/cloudinit/log.py b/cloudinit/log.py index 622c946c..3c79b9c9 100644 --- a/cloudinit/log.py +++ b/cloudinit/log.py @@ -28,7 +28,8 @@ import collections import os import sys -from StringIO import StringIO +import six +from six import StringIO # Logging levels for easy access CRITICAL = logging.CRITICAL @@ -72,13 +73,13 @@ def setupLogging(cfg=None): log_cfgs = [] log_cfg = cfg.get('logcfg') - if log_cfg and isinstance(log_cfg, (str, basestring)): + if log_cfg and isinstance(log_cfg, six.string_types): # If there is a 'logcfg' entry in the config, # respect it, it is the old keyname log_cfgs.append(str(log_cfg)) elif "log_cfgs" in cfg: for a_cfg in cfg['log_cfgs']: - if isinstance(a_cfg, (basestring, str)): + if isinstance(a_cfg, six.string_types): log_cfgs.append(a_cfg) elif isinstance(a_cfg, (collections.Iterable)): cfg_str = [str(c) for c in a_cfg] diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py index 03aa1ee1..e13f55ac 100644 --- a/cloudinit/mergers/__init__.py +++ b/cloudinit/mergers/__init__.py @@ -18,6 +18,8 @@ import re +import six + from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -95,7 +97,7 @@ def dict_extract_mergers(config): raw_mergers = config.pop('merge_type', None) if raw_mergers is None: return parsed_mergers - if isinstance(raw_mergers, (str, basestring)): + if isinstance(raw_mergers, six.string_types): return string_extract_mergers(raw_mergers) for m in raw_mergers: if isinstance(m, (dict)): diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py index a16141fa..87cf1a72 100644 --- a/cloudinit/mergers/m_dict.py +++ b/cloudinit/mergers/m_dict.py @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import six + DEF_MERGE_TYPE = 'no_replace' MERGE_TYPES = ('replace', DEF_MERGE_TYPE,) @@ -57,7 +59,7 @@ class Merger(object): return new_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, (basestring)) and self._recurse_str: + if isinstance(new_v, six.string_types) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py index 3b87b0fc..81e5c580 100644 --- a/cloudinit/mergers/m_list.py +++ b/cloudinit/mergers/m_list.py @@ -16,6 +16,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import six + DEF_MERGE_TYPE = 'replace' MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace') @@ -73,7 +75,7 @@ class Merger(object): return old_v if isinstance(new_v, (list, tuple)) and self._recurse_array: return self._merger.merge(old_v, new_v) - if isinstance(new_v, (str, basestring)) and self._recurse_str: + if isinstance(new_v, six.string_types) and self._recurse_str: return self._merger.merge(old_v, new_v) if isinstance(new_v, (dict)) and self._recurse_dict: return self._merger.merge(old_v, new_v) @@ -82,6 +84,6 @@ class Merger(object): # Ok now we are replacing same indexes merged_list.extend(value) common_len = min(len(merged_list), len(merge_with)) - for i in xrange(0, common_len): + for i in range(0, common_len): merged_list[i] = merge_same_index(merged_list[i], merge_with[i]) return merged_list diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py index e22ce28a..b00c4bf3 100644 --- a/cloudinit/mergers/m_str.py +++ b/cloudinit/mergers/m_str.py @@ -17,6 +17,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import six + class Merger(object): def __init__(self, _merger, opts): @@ -34,11 +36,11 @@ class Merger(object): # perform the following action, if appending we will # merge them together, otherwise we will just return value. def _on_str(self, value, merge_with): - if not isinstance(value, (basestring)): + if not isinstance(value, six.string_types): return merge_with if not self._append: return merge_with - if isinstance(value, unicode): - return value + unicode(merge_with) + if isinstance(value, six.text_type): + return value + six.text_type(merge_with) else: - return value + str(merge_with) + return value + six.binary_type(merge_with) diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py index fb40cc0d..e30d6fb5 100644 --- a/cloudinit/netinfo.py +++ b/cloudinit/netinfo.py @@ -87,7 +87,7 @@ def netdev_info(empty=""): devs[curdev][target] = toks[i][len(field) + 1:] if empty != "": - for (_devname, dev) in devs.iteritems(): + for (_devname, dev) in devs.items(): for field in dev: if dev[field] == "": dev[field] = empty @@ -181,7 +181,7 @@ def netdev_pformat(): else: fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address'] tbl = PrettyTable(fields) - for (dev, d) in netdev.iteritems(): + for (dev, d) in netdev.items(): tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]]) if d.get('addr6'): tbl.add_row([dev, d["up"], diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py index 40b0c94c..0d95f506 100644 --- a/cloudinit/signal_handler.py +++ b/cloudinit/signal_handler.py @@ -22,7 +22,7 @@ import inspect import signal import sys -from StringIO import StringIO +from six import StringIO from cloudinit import log as logging from cloudinit import util diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 15244a0d..eb474079 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -216,11 +216,11 @@ def on_first_boot(data, distro=None): files = data.get('files', {}) if files: LOG.debug("Writing %s injected files", len(files)) - for (filename, content) in files.iteritems(): + for (filename, content) in files.items(): if not filename.startswith(os.sep): filename = os.sep + filename try: - util.write_file(filename, content, mode=0660) + util.write_file(filename, content, mode=0o660) except IOError: util.logexc(LOG, "Failed writing file: %s", filename) diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 8f27ee89..b20ce2a1 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -18,7 +18,7 @@ from cloudinit import log as logging from cloudinit import util from cloudinit import sources from cloudinit import ec2_utils -from types import StringType + import functools @@ -72,10 +72,11 @@ class DataSourceDigitalOcean(sources.DataSource): return "\n".join(self.metadata['vendor-data']) def get_public_ssh_keys(self): - if type(self.metadata['public-keys']) is StringType: - return [self.metadata['public-keys']] + public_keys = self.metadata['public-keys'] + if isinstance(public_keys, list): + return public_keys else: - return self.metadata['public-keys'] + return [public_keys] @property def availability_zone(self): diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 1b20ecf3..798869b7 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -156,8 +156,8 @@ class DataSourceEc2(sources.DataSource): # 'ephemeral0': '/dev/sdb', # 'root': '/dev/sda1'} found = None - bdm_items = self.metadata['block-device-mapping'].iteritems() - for (entname, device) in bdm_items: + bdm = self.metadata['block-device-mapping'] + for (entname, device) in bdm.items(): if entname == name: found = device break diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index dfe90bc6..9a3e30c5 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -262,7 +262,7 @@ def check_seed_contents(content, seed): userdata = content.get('user-data', "") md = {} - for (key, val) in content.iteritems(): + for (key, val) in content.items(): if key == 'user-data': continue md[key] = val diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 7ba60735..58a4b2a2 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -66,7 +66,7 @@ class DataSourceOVF(sources.DataSource): np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } name = None - for (name, transfunc) in np.iteritems(): + for (name, transfunc) in np.items(): (contents, _dev, _fname) = transfunc() if contents: break @@ -138,7 +138,7 @@ def read_ovf_environment(contents): ud = "" cfg_props = ['password'] md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id'] - for (prop, val) in props.iteritems(): + for (prop, val) in props.items(): if prop == 'hostname': prop = "local-hostname" if prop in md_props: @@ -183,7 +183,7 @@ def transport_iso9660(require_iso=True): # Go through mounts to see if it was already mounted mounts = util.mounts() - for (dev, info) in mounts.iteritems(): + for (dev, info) in mounts.items(): fstype = info['fstype'] if fstype != "iso9660" and require_iso: continue diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 2733a2f6..7a975d78 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -30,12 +30,12 @@ # Comments with "@datadictionary" are snippets of the definition import base64 +import os +import serial + from cloudinit import log as logging from cloudinit import sources from cloudinit import util -import os -import os.path -import serial LOG = logging.getLogger(__name__) @@ -201,7 +201,7 @@ class DataSourceSmartOS(sources.DataSource): if b64_all is not None: self.b64_all = util.is_true(b64_all) - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems(): + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): smartos_noun, strip = attribute md[ci_noun] = self.query(smartos_noun, strip=strip) @@ -218,11 +218,12 @@ class DataSourceSmartOS(sources.DataSource): user_script = os.path.join(data_d, 'user-script') u_script_l = "%s/user-script" % LEGACY_USER_D write_boot_content(md.get('user-script'), content_f=user_script, - link=u_script_l, shebang=True, mode=0700) + link=u_script_l, shebang=True, mode=0o700) operator_script = os.path.join(data_d, 'operator-script') write_boot_content(md.get('operator-script'), - content_f=operator_script, shebang=False, mode=0700) + content_f=operator_script, shebang=False, + mode=0o700) # @datadictionary: This key has no defined format, but its value # is written to the file /var/db/mdata-user-data on each boot prior @@ -381,7 +382,7 @@ def dmi_data(): def write_boot_content(content, content_f, link=None, shebang=False, - mode=0400): + mode=0o400): """ Write the content to content_f. Under the following rules: 1. If no content, remove the file diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 7c7ef9ab..39eab51b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -23,6 +23,8 @@ import abc import os +import six + from cloudinit import importer from cloudinit import log as logging from cloudinit import type_utils @@ -130,7 +132,7 @@ class DataSource(object): # we want to return the correct value for what will actually # exist in this instance mappings = {"sd": ("vd", "xvd", "vtb")} - for (nfrom, tlist) in mappings.iteritems(): + for (nfrom, tlist) in mappings.items(): if not short_name.startswith(nfrom): continue for nto in tlist: @@ -218,18 +220,18 @@ def normalize_pubkey_data(pubkey_data): if not pubkey_data: return keys - if isinstance(pubkey_data, (basestring, str)): + if isinstance(pubkey_data, six.string_types): return str(pubkey_data).splitlines() if isinstance(pubkey_data, (list, set)): return list(pubkey_data) if isinstance(pubkey_data, (dict)): - for (_keyname, klist) in pubkey_data.iteritems(): + for (_keyname, klist) in pubkey_data.items(): # lp:506332 uec metadata service responds with # data that makes boto populate a string for 'klist' rather # than a list. - if isinstance(klist, (str, basestring)): + if isinstance(klist, six.string_types): klist = [klist] if isinstance(klist, (list, set)): for pkey in klist: diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index b7e19314..88c7a198 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -24,6 +24,8 @@ import copy import functools import os +import six + from cloudinit import ec2_utils from cloudinit import log as logging from cloudinit import sources @@ -205,7 +207,7 @@ class BaseReader(object): """ load_json_anytype = functools.partial( - util.load_json, root_types=(dict, basestring, list)) + util.load_json, root_types=(dict, list) + six.string_types) def datafiles(version): files = {} @@ -234,7 +236,7 @@ class BaseReader(object): 'version': 2, } data = datafiles(self._find_working_version()) - for (name, (path, required, translator)) in data.iteritems(): + for (name, (path, required, translator)) in data.items(): path = self._path_join(self.base_path, path) data = None found = False @@ -364,7 +366,7 @@ class ConfigDriveReader(BaseReader): raise NonReadable("%s: no files found" % (self.base_path)) md = {} - for (name, (key, translator, default)) in FILES_V1.iteritems(): + for (name, (key, translator, default)) in FILES_V1.items(): if name in found: path = found[name] try: @@ -478,7 +480,7 @@ def convert_vendordata_json(data, recurse=True): """ if not data: return None - if isinstance(data, (str, unicode, basestring)): + if isinstance(data, six.string_types): return data if isinstance(data, list): return copy.deepcopy(data) diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 14d0cb0f..9b2f5ed5 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -239,7 +239,7 @@ def setup_user_keys(keys, username, options=None): # Make sure the users .ssh dir is setup accordingly (ssh_dir, pwent) = users_ssh_info(username) if not os.path.isdir(ssh_dir): - util.ensure_dir(ssh_dir, mode=0700) + util.ensure_dir(ssh_dir, mode=0o700) util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid) # Turn the 'update' keys given into actual entries @@ -252,8 +252,8 @@ def setup_user_keys(keys, username, options=None): (auth_key_fn, auth_key_entries) = extract_authorized_keys(username) with util.SeLinuxGuard(ssh_dir, recursive=True): content = update_authorized_keys(auth_key_entries, key_entries) - util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700) - util.write_file(auth_key_fn, content, mode=0600) + util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700) + util.write_file(auth_key_fn, content, mode=0o600) util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 67f467f7..f4f4591d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -20,12 +20,13 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import cPickle as pickle - import copy import os import sys +import six +from six.moves import cPickle as pickle + from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) from cloudinit import handlers @@ -202,7 +203,7 @@ class Init(object): util.logexc(LOG, "Failed pickling datasource %s", self.datasource) return False try: - util.write_file(pickled_fn, pk_contents, mode=0400) + util.write_file(pickled_fn, pk_contents, mode=0o400) except Exception: util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn) return False @@ -324,15 +325,15 @@ class Init(object): def _store_userdata(self): raw_ud = "%s" % (self.datasource.get_userdata_raw()) - util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0600) + util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600) processed_ud = "%s" % (self.datasource.get_userdata()) - util.write_file(self._get_ipath('userdata'), processed_ud, 0600) + util.write_file(self._get_ipath('userdata'), processed_ud, 0o600) def _store_vendordata(self): raw_vd = "%s" % (self.datasource.get_vendordata_raw()) - util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600) + util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600) processed_vd = "%s" % (self.datasource.get_vendordata()) - util.write_file(self._get_ipath('vendordata'), processed_vd, 0600) + util.write_file(self._get_ipath('vendordata'), processed_vd, 0o600) def _default_handlers(self, opts=None): if opts is None: @@ -384,7 +385,7 @@ class Init(object): if not path or not os.path.isdir(path): return potential_handlers = util.find_modules(path) - for (fname, mod_name) in potential_handlers.iteritems(): + for (fname, mod_name) in potential_handlers.items(): try: mod_locs, looked_locs = importer.find_module( mod_name, [''], ['list_types', 'handle_part']) @@ -422,7 +423,7 @@ class Init(object): def init_handlers(): # Init the handlers first - for (_ctype, mod) in c_handlers.iteritems(): + for (_ctype, mod) in c_handlers.items(): if mod in c_handlers.initialized: # Avoid initing the same module twice (if said module # is registered to more than one content-type). @@ -449,7 +450,7 @@ class Init(object): def finalize_handlers(): # Give callbacks opportunity to finalize - for (_ctype, mod) in c_handlers.iteritems(): + for (_ctype, mod) in c_handlers.items(): if mod not in c_handlers.initialized: # Said module was never inited in the first place, so lets # not attempt to finalize those that never got called. @@ -574,7 +575,7 @@ class Modules(object): for item in cfg_mods: if not item: continue - if isinstance(item, (str, basestring)): + if isinstance(item, six.string_types): module_list.append({ 'mod': item.strip(), }) diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py index cc3d9495..b93efd6a 100644 --- a/cloudinit/type_utils.py +++ b/cloudinit/type_utils.py @@ -22,11 +22,31 @@ import types +import six + + +if six.PY3: + _NAME_TYPES = ( + types.ModuleType, + types.FunctionType, + types.LambdaType, + type, + ) +else: + _NAME_TYPES = ( + types.TypeType, + types.ModuleType, + types.FunctionType, + types.LambdaType, + types.ClassType, + ) + def obj_name(obj): - if isinstance(obj, (types.TypeType, - types.ModuleType, - types.FunctionType, - types.LambdaType)): - return str(obj.__name__) - return obj_name(obj.__class__) + if isinstance(obj, _NAME_TYPES): + return six.text_type(obj.__name__) + else: + if not hasattr(obj, '__class__'): + return repr(obj) + else: + return obj_name(obj.__class__) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 3074dd08..62001dff 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -20,21 +20,29 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import httplib import time -import urllib + +import six import requests from requests import exceptions -from urlparse import (urlparse, urlunparse) +from six.moves.urllib.parse import ( + urlparse, urlunparse, + quote as urlquote) from cloudinit import log as logging from cloudinit import version LOG = logging.getLogger(__name__) -NOT_FOUND = httplib.NOT_FOUND +if six.PY2: + import httplib + NOT_FOUND = httplib.NOT_FOUND +else: + import http.client + NOT_FOUND = http.client.NOT_FOUND + # Check if requests has ssl support (added in requests >= 0.8.8) SSL_ENABLED = False @@ -70,7 +78,7 @@ def combine_url(base, *add_ons): path = url_parsed[2] if path and not path.endswith("/"): path += "/" - path += urllib.quote(str(add_on), safe="/:") + path += urlquote(str(add_on), safe="/:") url_parsed[2] = path return urlunparse(url_parsed) @@ -111,7 +119,7 @@ class UrlResponse(object): @property def contents(self): - return self._response.content + return self._response.text @property def url(self): @@ -135,7 +143,7 @@ class UrlResponse(object): return self._response.status_code def __str__(self): - return self.contents + return self._response.text class UrlError(IOError): diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py index de6487d8..9111bd39 100644 --- a/cloudinit/user_data.py +++ b/cloudinit/user_data.py @@ -29,6 +29,8 @@ from email.mime.multipart import MIMEMultipart from email.mime.nonmultipart import MIMENonMultipart from email.mime.text import MIMEText +import six + from cloudinit import handlers from cloudinit import log as logging from cloudinit import util @@ -235,7 +237,7 @@ class UserDataProcessor(object): resp = util.read_file_or_url(include_url, ssl_details=self.ssl_details) if include_once_on and resp.ok(): - util.write_file(include_once_fn, str(resp), mode=0600) + util.write_file(include_once_fn, str(resp), mode=0o600) if resp.ok(): content = str(resp) else: @@ -256,7 +258,7 @@ class UserDataProcessor(object): # filename and type not be present # or # scalar(payload) - if isinstance(ent, (str, basestring)): + if isinstance(ent, six.string_types): ent = {'content': ent} if not isinstance(ent, (dict)): # TODO(harlowja) raise? @@ -337,7 +339,7 @@ def convert_string(raw_data, headers=None): data = util.decomp_gzip(raw_data) if "mime-version:" in data[0:4096].lower(): msg = email.message_from_string(data) - for (key, val) in headers.iteritems(): + for (key, val) in headers.items(): _replace_header(msg, key, val) else: mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE) diff --git a/cloudinit/util.py b/cloudinit/util.py index 9efc704a..434ba7fb 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -20,8 +20,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from StringIO import StringIO - import contextlib import copy as obj_copy import ctypes @@ -45,8 +43,10 @@ import subprocess import sys import tempfile import time -import urlparse +from six.moves.urllib import parse as urlparse + +import six import yaml from cloudinit import importer @@ -69,8 +69,26 @@ FN_REPLACEMENTS = { } FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters) +TRUE_STRINGS = ('true', '1', 'on', 'yes') +FALSE_STRINGS = ('off', '0', 'no', 'false') + + # Helper utils to see if running in a container -CONTAINER_TESTS = ['running-in-container', 'lxc-is-container'] +CONTAINER_TESTS = ('running-in-container', 'lxc-is-container') + + +def decode_binary(blob, encoding='utf-8'): + # Converts a binary type into a text type using given encoding. + if isinstance(blob, six.text_type): + return blob + return blob.decode(encoding) + + +def encode_text(text, encoding='utf-8'): + # Converts a text string into a binary type using given encoding. + if isinstance(text, six.binary_type): + return text + return text.encode(encoding) class ProcessExecutionError(IOError): @@ -95,7 +113,7 @@ class ProcessExecutionError(IOError): else: self.description = description - if not isinstance(exit_code, (long, int)): + if not isinstance(exit_code, six.integer_types): self.exit_code = '-' else: self.exit_code = exit_code @@ -151,7 +169,8 @@ class SeLinuxGuard(object): path = os.path.realpath(self.path) # path should be a string, not unicode - path = str(path) + if six.PY2: + path = str(path) try: stats = os.lstat(path) self.selinux.matchpathcon(path, stats[stat.ST_MODE]) @@ -209,10 +228,10 @@ def fork_cb(child_cb, *args, **kwargs): def is_true(val, addons=None): if isinstance(val, (bool)): return val is True - check_set = ['true', '1', 'on', 'yes'] + check_set = TRUE_STRINGS if addons: - check_set = check_set + addons - if str(val).lower().strip() in check_set: + check_set = list(check_set) + addons + if six.text_type(val).lower().strip() in check_set: return True return False @@ -220,10 +239,10 @@ def is_true(val, addons=None): def is_false(val, addons=None): if isinstance(val, (bool)): return val is False - check_set = ['off', '0', 'no', 'false'] + check_set = FALSE_STRINGS if addons: - check_set = check_set + addons - if str(val).lower().strip() in check_set: + check_set = list(check_set) + addons + if six.text_type(val).lower().strip() in check_set: return True return False @@ -273,7 +292,7 @@ def uniq_merge_sorted(*lists): def uniq_merge(*lists): combined_list = [] for a_list in lists: - if isinstance(a_list, (str, basestring)): + if isinstance(a_list, six.string_types): a_list = a_list.strip().split(",") # Kickout the empty ones a_list = [a for a in a_list if len(a)] @@ -282,7 +301,7 @@ def uniq_merge(*lists): def clean_filename(fn): - for (k, v) in FN_REPLACEMENTS.iteritems(): + for (k, v) in FN_REPLACEMENTS.items(): fn = fn.replace(k, v) removals = [] for k in fn: @@ -296,14 +315,14 @@ def clean_filename(fn): def decomp_gzip(data, quiet=True): try: - buf = StringIO(str(data)) + buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: - return gh.read() + return decode_binary(gh.read()) except Exception as e: if quiet: return data else: - raise DecompressionError(str(e)) + raise DecompressionError(six.text_type(e)) def extract_usergroup(ug_pair): @@ -362,7 +381,7 @@ def multi_log(text, console=True, stderr=True, def load_json(text, root_types=(dict,)): - decoded = json.loads(text) + decoded = json.loads(decode_binary(text)) if not isinstance(decoded, tuple(root_types)): expected_types = ", ".join([str(t) for t in root_types]) raise TypeError("(%s) root types expected, got %s instead" @@ -394,7 +413,7 @@ def get_cfg_option_str(yobj, key, default=None): if key not in yobj: return default val = yobj[key] - if not isinstance(val, (str, basestring)): + if not isinstance(val, six.string_types): val = str(val) return val @@ -433,7 +452,7 @@ def get_cfg_option_list(yobj, key, default=None): if isinstance(val, (list)): cval = [v for v in val] return cval - if not isinstance(val, (basestring)): + if not isinstance(val, six.string_types): val = str(val) return [val] @@ -708,10 +727,10 @@ def read_file_or_url(url, timeout=5, retries=10, def load_yaml(blob, default=None, allowed=(dict,)): loaded = default + blob = decode_binary(blob) try: - blob = str(blob) - LOG.debug(("Attempting to load yaml from string " - "of length %s with allowed root types %s"), + LOG.debug("Attempting to load yaml from string " + "of length %s with allowed root types %s", len(blob), allowed) converted = safeyaml.load(blob) if not isinstance(converted, allowed): @@ -746,14 +765,12 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): md_resp = read_file_or_url(md_url, timeout, retries, file_retries) md = None if md_resp.ok(): - md_str = str(md_resp) - md = load_yaml(md_str, default={}) + md = load_yaml(md_resp.contents, default={}) ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries) ud = None if ud_resp.ok(): - ud_str = str(ud_resp) - ud = ud_str + ud = ud_resp.contents return (md, ud) @@ -784,7 +801,7 @@ def read_conf_with_confd(cfgfile): if "conf_d" in cfg: confd = cfg['conf_d'] if confd: - if not isinstance(confd, (str, basestring)): + if not isinstance(confd, six.string_types): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % (cfgfile, type_utils.obj_name(confd))) @@ -921,8 +938,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'), return (None, None, None) resp = read_file_or_url(url) - if resp.contents.startswith(starts) and resp.ok(): - return (key, url, str(resp)) + if resp.ok() and resp.contents.startswith(starts): + return (key, url, resp.contents) return (key, url, None) @@ -1076,9 +1093,9 @@ def uniq_list(in_list): return out_list -def load_file(fname, read_cb=None, quiet=False): +def load_file(fname, read_cb=None, quiet=False, decode=True): LOG.debug("Reading from %s (quiet=%s)", fname, quiet) - ofh = StringIO() + ofh = six.BytesIO() try: with open(fname, 'rb') as ifh: pipe_in_out(ifh, ofh, chunk_cb=read_cb) @@ -1089,7 +1106,10 @@ def load_file(fname, read_cb=None, quiet=False): raise contents = ofh.getvalue() LOG.debug("Read %s bytes from %s", len(contents), fname) - return contents + if decode: + return decode_binary(contents) + else: + return contents def get_cmdline(): @@ -1219,7 +1239,7 @@ def logexc(log, msg, *args): def hash_blob(blob, routine, mlen=None): hasher = hashlib.new(routine) - hasher.update(blob) + hasher.update(encode_text(blob)) digest = hasher.hexdigest() # Don't get to long now if mlen is not None: @@ -1280,8 +1300,7 @@ def yaml_dumps(obj, explicit_start=True, explicit_end=True): indent=4, explicit_start=explicit_start, explicit_end=explicit_end, - default_flow_style=False, - allow_unicode=True) + default_flow_style=False) def ensure_dir(path, mode=None): @@ -1515,11 +1534,17 @@ def write_file(filename, content, mode=0o644, omode="wb"): @param filename: The full path of the file to write. @param content: The content to write to the file. @param mode: The filesystem mode to set on the file. - @param omode: The open mode used when opening the file (r, rb, a, etc.) + @param omode: The open mode used when opening the file (w, wb, a, etc.) """ ensure_dir(os.path.dirname(filename)) - LOG.debug("Writing to %s - %s: [%s] %s bytes", - filename, omode, mode, len(content)) + if 'b' in omode.lower(): + content = encode_text(content) + write_type = 'bytes' + else: + content = decode_binary(content) + write_type = 'characters' + LOG.debug("Writing to %s - %s: [%s] %s %s", + filename, omode, mode, len(content), write_type) with SeLinuxGuard(path=filename): with open(filename, omode) as fh: fh.write(content) @@ -1608,10 +1633,10 @@ def shellify(cmdlist, add_header=True): if isinstance(args, list): fixed = [] for f in args: - fixed.append("'%s'" % (str(f).replace("'", escaped))) + fixed.append("'%s'" % (six.text_type(f).replace("'", escaped))) content = "%s%s\n" % (content, ' '.join(fixed)) cmds_made += 1 - elif isinstance(args, (str, basestring)): + elif isinstance(args, six.string_types): content = "%s%s\n" % (content, args) cmds_made += 1 else: @@ -1722,7 +1747,7 @@ def expand_package_list(version_fmt, pkgs): pkglist = [] for pkg in pkgs: - if isinstance(pkg, basestring): + if isinstance(pkg, six.string_types): pkglist.append(pkg) continue diff --git a/packages/bddeb b/packages/bddeb index 9d264f92..83ca68bb 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -38,6 +38,7 @@ PKG_MP = { 'pyserial': 'python-serial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', + 'six': 'python-six', } DEBUILD_ARGS = ["-S", "-d"] diff --git a/packages/brpm b/packages/brpm index 9657b1dd..72bfca08 100755 --- a/packages/brpm +++ b/packages/brpm @@ -45,6 +45,7 @@ PKG_MP = { 'pyserial': 'pyserial', 'pyyaml': 'PyYAML', 'requests': 'python-requests', + 'six': 'python-six', }, 'suse': { 'argparse': 'python-argparse', @@ -56,6 +57,7 @@ PKG_MP = { 'pyserial': 'python-pyserial', 'pyyaml': 'python-yaml', 'requests': 'python-requests', + 'six': 'python-six', } } diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index 03296e62..a35afc27 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -1,11 +1,11 @@ """Tests for handling of userdata within cloud init.""" -import StringIO - import gzip import logging import os +from six import BytesIO, StringIO + from email.mime.application import MIMEApplication from email.mime.base import MIMEBase from email.mime.multipart import MIMEMultipart @@ -53,7 +53,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase): self.patchUtils(root) def capture_log(self, lvl=logging.DEBUG): - log_file = StringIO.StringIO() + log_file = StringIO() self._log_handler = logging.StreamHandler(log_file) self._log_handler.setLevel(lvl) self._log = log.getLogger() @@ -351,9 +351,9 @@ p: 1 """Tests that individual message gzip encoding works.""" def gzip_part(text): - contents = StringIO.StringIO() - f = gzip.GzipFile(fileobj=contents, mode='w') - f.write(str(text)) + contents = BytesIO() + f = gzip.GzipFile(fileobj=contents, mode='wb') + f.write(util.encode_text(text)) f.flush() f.close() return MIMEApplication(contents.getvalue(), 'gzip') diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index e9235951..ae9e6c22 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -85,7 +85,7 @@ class TestNoCloudDataSource(MockerTestCase): data = { 'fs_label': None, - 'meta-data': {'instance-id': 'IID'}, + 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), 'user-data': "USER_DATA_RAW", } diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 49894e51..81ef1546 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -20,12 +20,11 @@ import copy import json import re -from StringIO import StringIO - -from urlparse import urlparse - from .. import helpers as test_helpers +from six import StringIO +from six.moves.urllib.parse import urlparse + from cloudinit import helpers from cloudinit import settings from cloudinit.sources import DataSourceOpenStack as ds diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py index 33a1d6e1..6e1a0b69 100644 --- a/tests/unittests/test_distros/test_netconfig.py +++ b/tests/unittests/test_distros/test_netconfig.py @@ -4,6 +4,8 @@ import mocker import os +from six import StringIO + from cloudinit import distros from cloudinit import helpers from cloudinit import settings @@ -11,8 +13,6 @@ from cloudinit import util from cloudinit.distros.parsers.sys_conf import SysConf -from StringIO import StringIO - BASE_NET_CFG = ''' auto lo diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index 203dd2aa..f5832365 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -16,12 +16,12 @@ class TestAptProxyConfig(MockerTestCase): self.cfile = os.path.join(self.tmp, "config.cfg") def _search_apt_config(self, contents, ptype, value): - print( + ## print( + ## r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), + ## contents, "flags=re.IGNORECASE") + return re.search( r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), - contents, "flags=re.IGNORECASE") - return(re.search( - r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value), - contents, flags=re.IGNORECASE)) + contents, flags=re.IGNORECASE) def test_apt_proxy_written(self): cfg = {'apt_proxy': 'myproxy'} diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py index eb251636..690ef86f 100644 --- a/tests/unittests/test_handler/test_handler_locale.py +++ b/tests/unittests/test_handler/test_handler_locale.py @@ -29,7 +29,7 @@ from .. import helpers as t_help from configobj import ConfigObj -from StringIO import StringIO +from six import BytesIO import logging @@ -59,6 +59,6 @@ class TestLocale(t_help.FilesystemMockingTestCase): cc = self._get_cloud('sles') cc_locale.handle('cc_locale', cfg, cc, LOG, []) - contents = util.load_file('/etc/sysconfig/language') - n_cfg = ConfigObj(StringIO(contents)) + contents = util.load_file('/etc/sysconfig/language', decode=False) + n_cfg = ConfigObj(BytesIO(contents)) self.assertEquals({'RC_LANG': cfg['locale']}, dict(n_cfg)) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 40481f16..579377fb 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -22,7 +22,7 @@ import base64 import gzip import tempfile -from StringIO import StringIO +from six import StringIO from cloudinit import cloud from cloudinit import distros diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index e1530e30..a9f7829b 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -9,7 +9,7 @@ from .. import helpers as t_help import logging -from StringIO import StringIO +from six import BytesIO from configobj import ConfigObj @@ -38,8 +38,8 @@ class TestHostname(t_help.FilesystemMockingTestCase): cc_set_hostname.handle('cc_set_hostname', cfg, cc, LOG, []) if not distro.uses_systemd(): - contents = util.load_file("/etc/sysconfig/network") - n_cfg = ConfigObj(StringIO(contents)) + contents = util.load_file("/etc/sysconfig/network", decode=False) + n_cfg = ConfigObj(BytesIO(contents)) self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'}, dict(n_cfg)) diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py index 874db340..10ea2040 100644 --- a/tests/unittests/test_handler/test_handler_timezone.py +++ b/tests/unittests/test_handler/test_handler_timezone.py @@ -29,7 +29,7 @@ from .. import helpers as t_help from configobj import ConfigObj -from StringIO import StringIO +from six import BytesIO import logging @@ -67,8 +67,8 @@ class TestTimezone(t_help.FilesystemMockingTestCase): cc_timezone.handle('cc_timezone', cfg, cc, LOG, []) - contents = util.load_file('/etc/sysconfig/clock') - n_cfg = ConfigObj(StringIO(contents)) + contents = util.load_file('/etc/sysconfig/clock', decode=False) + n_cfg = ConfigObj(BytesIO(contents)) self.assertEquals({'TIMEZONE': cfg['timezone']}, dict(n_cfg)) contents = util.load_file('/etc/localtime') diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py index 435c9787..81806ad1 100644 --- a/tests/unittests/test_handler/test_handler_yum_add_repo.py +++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py @@ -6,7 +6,7 @@ from .. import helpers import logging -from StringIO import StringIO +from six import BytesIO import configobj @@ -52,8 +52,9 @@ class TestConfig(helpers.FilesystemMockingTestCase): } self.patchUtils(self.tmp) cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, []) - contents = util.load_file("/etc/yum.repos.d/epel_testing.repo") - contents = configobj.ConfigObj(StringIO(contents)) + contents = util.load_file("/etc/yum.repos.d/epel_testing.repo", + decode=False) + contents = configobj.ConfigObj(BytesIO(contents)) expected = { 'epel_testing': { 'name': 'Extra Packages for Enterprise Linux 5 - Testing', -- cgit v1.2.3 From 3b798b5d5c3caa5d0e8e534855e29010ca932aaa Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Thu, 22 Jan 2015 21:21:04 -0500 Subject: Low hanging Python 3 fruit. --- cloudinit/config/cc_ca_certs.py | 4 ++-- cloudinit/config/cc_chef.py | 6 ++++-- cloudinit/distros/__init__.py | 12 ++++++++++-- cloudinit/distros/debian.py | 2 +- cloudinit/distros/rhel_util.py | 4 ++-- cloudinit/distros/sles.py | 2 +- cloudinit/sources/DataSourceAltCloud.py | 12 ++++++------ cloudinit/sources/DataSourceAzure.py | 4 ++-- cloudinit/sources/DataSourceMAAS.py | 10 ++++++---- cloudinit/sources/DataSourceOpenNebula.py | 2 +- cloudinit/templater.py | 2 +- cloudinit/util.py | 7 +++++-- templates/resolv.conf.tmpl | 2 +- tests/unittests/helpers.py | 4 ++-- tests/unittests/test_datasource/test_configdrive.py | 2 +- tests/unittests/test_datasource/test_digitalocean.py | 7 +++---- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_opennebula.py | 2 +- tests/unittests/test_datasource/test_smartos.py | 4 +++- .../unittests/test_handler/test_handler_apt_configure.py | 2 +- tests/unittests/test_merging.py | 16 +++++++++------- tools/ccfg-merge-debug | 4 ++-- 22 files changed, 65 insertions(+), 47 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py index 4f2a46a1..8248b020 100644 --- a/cloudinit/config/cc_ca_certs.py +++ b/cloudinit/config/cc_ca_certs.py @@ -44,7 +44,7 @@ def add_ca_certs(certs): if certs: # First ensure they are strings... cert_file_contents = "\n".join([str(c) for c in certs]) - util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644) + util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644) # Append cert filename to CA_CERT_CONFIG file. # We have to strip the content because blank lines in the file @@ -63,7 +63,7 @@ def remove_default_ca_certs(): """ util.delete_dir_contents(CA_CERT_PATH) util.delete_dir_contents(CA_CERT_SYSTEM_PATH) - util.write_file(CA_CERT_CONFIG, "", mode=0644) + util.write_file(CA_CERT_CONFIG, "", mode=0o644) debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no" util.subp(('debconf-set-selections', '-'), debconf_sel) diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py index fc837363..584199e5 100644 --- a/cloudinit/config/cc_chef.py +++ b/cloudinit/config/cc_chef.py @@ -76,6 +76,8 @@ from cloudinit import templater from cloudinit import url_helper from cloudinit import util +import six + RUBY_VERSION_DEFAULT = "1.8" CHEF_DIRS = tuple([ @@ -261,7 +263,7 @@ def run_chef(chef_cfg, log): cmd_args = chef_cfg['exec_arguments'] if isinstance(cmd_args, (list, tuple)): cmd.extend(cmd_args) - elif isinstance(cmd_args, (str, basestring)): + elif isinstance(cmd_args, six.string_types): cmd.append(cmd_args) else: log.warn("Unknown type %s provided for chef" @@ -300,7 +302,7 @@ def install_chef(cloud, chef_cfg, log): with util.tempdir() as tmpd: # Use tmpdir over tmpfile to avoid 'text file busy' on execute tmpf = "%s/chef-omnibus-install" % tmpd - util.write_file(tmpf, str(content), mode=0700) + util.write_file(tmpf, str(content), mode=0o700) util.subp([tmpf], capture=False) else: log.warn("Unknown chef install type '%s'", install_type) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 4ebccdda..6b96d58c 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -25,7 +25,6 @@ import six from six import StringIO import abc -import itertools import os import re @@ -37,6 +36,15 @@ from cloudinit import util from cloudinit.distros.parsers import hosts +try: + # Python 3 + from six import filter +except ImportError: + # Python 2 + from itertools import ifilter as filter + + + OSFAMILIES = { 'debian': ['debian', 'ubuntu'], 'redhat': ['fedora', 'rhel'], @@ -853,7 +861,7 @@ def extract_default(users, default_name=None, default_config=None): return config['default'] tmp_users = users.items() - tmp_users = dict(itertools.ifilter(safe_find, tmp_users)) + tmp_users = dict(filter(safe_find, tmp_users)) if not tmp_users: return (default_name, default_config) else: diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index b09eb094..6d3a82bf 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -97,7 +97,7 @@ class Distro(distros.Distro): if not conf: conf = HostnameConf('') conf.set_hostname(your_hostname) - util.write_file(out_fn, str(conf), 0644) + util.write_file(out_fn, str(conf), 0o644) def _read_system_hostname(self): sys_hostname = self._read_hostname(self.hostname_conf_fn) diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py index 063d536e..903d7793 100644 --- a/cloudinit/distros/rhel_util.py +++ b/cloudinit/distros/rhel_util.py @@ -50,7 +50,7 @@ def update_sysconfig_file(fn, adjustments, allow_empty=False): ] if not exists: lines.insert(0, util.make_header()) - util.write_file(fn, "\n".join(lines) + "\n", 0644) + util.write_file(fn, "\n".join(lines) + "\n", 0o644) # Helper function to read a RHEL/SUSE /etc/sysconfig/* file @@ -86,4 +86,4 @@ def update_resolve_conf_file(fn, dns_servers, search_servers): r_conf.add_search_domain(s) except ValueError: util.logexc(LOG, "Failed at adding search domain %s", s) - util.write_file(fn, str(r_conf), 0644) + util.write_file(fn, str(r_conf), 0o644) diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py index 0c6d1203..620c974c 100644 --- a/cloudinit/distros/sles.py +++ b/cloudinit/distros/sles.py @@ -113,7 +113,7 @@ class Distro(distros.Distro): if not conf: conf = HostnameConf('') conf.set_hostname(hostname) - util.write_file(out_fn, str(conf), 0644) + util.write_file(out_fn, str(conf), 0o644) def _read_system_hostname(self): host_fn = self.hostname_conf_fn diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index 1e913a6e..69053d0b 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -124,11 +124,11 @@ class DataSourceAltCloud(sources.DataSource): cmd = CMD_DMI_SYSTEM try: (cmd_out, _err) = util.subp(cmd) - except ProcessExecutionError, _err: + except ProcessExecutionError as _err: LOG.debug(('Failed command: %s\n%s') % \ (' '.join(cmd), _err.message)) return 'UNKNOWN' - except OSError, _err: + except OSError as _err: LOG.debug(('Failed command: %s\n%s') % \ (' '.join(cmd), _err.message)) return 'UNKNOWN' @@ -211,11 +211,11 @@ class DataSourceAltCloud(sources.DataSource): cmd = CMD_PROBE_FLOPPY (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) - except ProcessExecutionError, _err: + except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err.message) return False - except OSError, _err: + except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err.message) return False @@ -228,11 +228,11 @@ class DataSourceAltCloud(sources.DataSource): cmd.append('--exit-if-exists=' + floppy_dev) (cmd_out, _err) = util.subp(cmd) LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out)) - except ProcessExecutionError, _err: + except ProcessExecutionError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err.message) return False - except OSError, _err: + except OSError as _err: util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd), _err.message) return False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 09bc196d..29ae2c22 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -151,7 +151,7 @@ class DataSourceAzureNet(sources.DataSource): # walinux agent writes files world readable, but expects # the directory to be protected. - write_files(ddir, files, dirmode=0700) + write_files(ddir, files, dirmode=0o700) # handle the hostname 'publishing' try: @@ -390,7 +390,7 @@ def write_files(datadir, files, dirmode=None): util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): util.write_file(filename=os.path.join(datadir, name), - content=content, mode=0600) + content=content, mode=0o600) def invoke_agent(cmd): diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 9a3e30c5..8f9c81de 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -18,6 +18,8 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from __future__ import print_function + from email.utils import parsedate import errno import oauth.oauth as oauth @@ -361,7 +363,7 @@ if __name__ == "__main__": return (urllib2.urlopen(req).read()) def printurl(url, headers_cb): - print "== %s ==\n%s\n" % (url, geturl(url, headers_cb)) + print("== %s ==\n%s\n" % (url, geturl(url, headers_cb))) def crawl(url, headers_cb=None): if url.endswith("/"): @@ -386,9 +388,9 @@ if __name__ == "__main__": version=args.apiver) else: (userdata, metadata) = read_maas_seed_url(args.url) - print "=== userdata ===" - print userdata - print "=== metadata ===" + print("=== userdata ===") + print(userdata) + print("=== metadata ===") pprint.pprint(metadata) elif args.subcmd == "get": diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index e2469f6e..f9dac29e 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -280,7 +280,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None, # allvars expands to all existing variables by using '${!x*}' notation # where x is lower or upper case letters or '_' - allvars = ["${!%s*}" % x for x in string.letters + "_"] + allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"] keylist_in = keylist if keylist is None: diff --git a/cloudinit/templater.py b/cloudinit/templater.py index 4cd3f13d..a9231482 100644 --- a/cloudinit/templater.py +++ b/cloudinit/templater.py @@ -137,7 +137,7 @@ def render_from_file(fn, params): return renderer(content, params) -def render_to_file(fn, outfn, params, mode=0644): +def render_to_file(fn, outfn, params, mode=0o644): contents = render_from_file(fn, params) util.write_file(outfn, contents, mode=mode) diff --git a/cloudinit/util.py b/cloudinit/util.py index 434ba7fb..94fd5c70 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -142,6 +142,9 @@ class ProcessExecutionError(IOError): 'reason': self.reason, } IOError.__init__(self, message) + # For backward compatibility with Python 2. + if not hasattr(self, 'message'): + self.message = message class SeLinuxGuard(object): @@ -260,7 +263,7 @@ def translate_bool(val, addons=None): def rand_str(strlen=32, select_from=None): if not select_from: - select_from = string.letters + string.digits + select_from = string.ascii_letters + string.digits return "".join([random.choice(select_from) for _x in range(0, strlen)]) @@ -1127,7 +1130,7 @@ def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None): bytes_piped = 0 while True: data = in_fh.read(chunk_size) - if data == '': + if len(data) == 0: break else: out_fh.write(data) diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl index 1300156c..bfae80db 100644 --- a/templates/resolv.conf.tmpl +++ b/templates/resolv.conf.tmpl @@ -24,7 +24,7 @@ sortlist {% for sort in sortlist %}{{sort}} {% endfor %} {% if options or flags %} options {% for flag in flags %}{{flag}} {% endfor %} -{% for key, value in options.iteritems() -%} +{% for key, value in options.items() -%} {{key}}:{{value}} {% endfor %} {% endif %} diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 38a2176d..70b8116f 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -65,7 +65,7 @@ if PY26: def assertDictContainsSubset(self, expected, actual, msg=None): missing = [] mismatched = [] - for k, v in expected.iteritems(): + for k, v in expected.items(): if k not in actual: missing.append(k) elif actual[k] != v: @@ -243,7 +243,7 @@ class HttprettyTestCase(TestCase): def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) - for (name, content) in files.iteritems(): + for (name, content) in files.items(): with open(os.path.join(path, name), "w") as fp: fp.write(content) fp.close() diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 800c5fd8..258c68e2 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -338,7 +338,7 @@ def populate_ds_from_read_config(cfg_ds, source, results): def populate_dir(seed_dir, files): - for (name, content) in files.iteritems(): + for (name, content) in files.items(): path = os.path.join(seed_dir, name) dirname = os.path.dirname(path) if not os.path.isdir(dirname): diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py index d1270fc2..98f9cfac 100644 --- a/tests/unittests/test_datasource/test_digitalocean.py +++ b/tests/unittests/test_datasource/test_digitalocean.py @@ -18,8 +18,7 @@ import httpretty import re -from types import ListType -from urlparse import urlparse +from six.moves.urllib_parse import urlparse from cloudinit import settings from cloudinit import helpers @@ -110,7 +109,7 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): self.assertEqual([DO_META.get('public-keys')], self.ds.get_public_ssh_keys()) - self.assertIs(type(self.ds.get_public_ssh_keys()), ListType) + self.assertIsInstance(self.ds.get_public_ssh_keys(), list) @httpretty.activate def test_multiple_ssh_keys(self): @@ -124,4 +123,4 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase): self.assertEqual(DO_META.get('public-keys').splitlines(), self.ds.get_public_ssh_keys()) - self.assertIs(type(self.ds.get_public_ssh_keys()), ListType) + self.assertIsInstance(self.ds.get_public_ssh_keys(), list) diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 06050bb1..aa60eb33 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -19,7 +19,7 @@ import httpretty import re from base64 import b64encode, b64decode -from urlparse import urlparse +from six.moves.urllib_parse import urlparse from cloudinit import settings from cloudinit import helpers diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index ddf77265..b79237f0 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -294,7 +294,7 @@ class TestParseShellConfig(unittest.TestCase): def populate_context_dir(path, variables): data = "# Context variables generated by OpenNebula\n" - for (k, v) in variables.iteritems(): + for (k, v) in variables.items(): data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) populate_dir(path, {'context.sh': data}) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 35d7ef5e..01b9b73e 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -22,6 +22,8 @@ # return responses. # +from __future__ import print_function + import base64 from cloudinit import helpers as c_helpers from cloudinit.sources import DataSourceSmartOS @@ -369,7 +371,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] if re.match(r'.*\/mdata-user-data$', name_f): found_new = True - print name_f + print(name_f) self.assertEquals(permissions, '400') self.assertFalse(found_new) diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index 2c3dad72..d72fa8c7 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -62,7 +62,7 @@ class TestAptProxyConfig(unittest.TestCase): contents = str(util.read_file_or_url(self.pfile)) - for ptype, pval in values.iteritems(): + for ptype, pval in values.items(): self.assertTrue(self._search_apt_config(contents, ptype, pval)) def test_proxy_deleted(self): diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py index 07b610f7..976d8283 100644 --- a/tests/unittests/test_merging.py +++ b/tests/unittests/test_merging.py @@ -11,11 +11,13 @@ import glob import os import random import re +import six import string SOURCE_PAT = "source*.*yaml" EXPECTED_PAT = "expected%s.yaml" -TYPES = [long, int, dict, str, list, tuple, None] +TYPES = [dict, str, list, tuple, None] +TYPES.extend(six.integer_types) def _old_mergedict(src, cand): @@ -25,7 +27,7 @@ def _old_mergedict(src, cand): Nested dictionaries are merged recursively. """ if isinstance(src, dict) and isinstance(cand, dict): - for (k, v) in cand.iteritems(): + for (k, v) in cand.items(): if k not in src: src[k] = v else: @@ -42,8 +44,8 @@ def _old_mergemanydict(*args): def _random_str(rand): base = '' - for _i in xrange(rand.randint(1, 2 ** 8)): - base += rand.choice(string.letters + string.digits) + for _i in range(rand.randint(1, 2 ** 8)): + base += rand.choice(string.ascii_letters + string.digits) return base @@ -64,7 +66,7 @@ def _make_dict(current_depth, max_depth, rand): if t in [dict, list, tuple]: if t in [dict]: amount = rand.randint(0, 5) - keys = [_random_str(rand) for _i in xrange(0, amount)] + keys = [_random_str(rand) for _i in range(0, amount)] base = {} for k in keys: try: @@ -74,14 +76,14 @@ def _make_dict(current_depth, max_depth, rand): elif t in [list, tuple]: base = [] amount = rand.randint(0, 5) - for _i in xrange(0, amount): + for _i in range(0, amount): try: base.append(_make_dict(current_depth + 1, max_depth, rand)) except _NoMoreException: pass if t in [tuple]: base = tuple(base) - elif t in [long, int]: + elif t in six.integer_types: base = rand.randint(0, 2 ** 8) elif t in [str]: base = _random_str(rand) diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug index 85227da7..1f08e0cb 100755 --- a/tools/ccfg-merge-debug +++ b/tools/ccfg-merge-debug @@ -51,7 +51,7 @@ def main(): c_handlers.register(ccph) called = [] - for (_ctype, mod) in c_handlers.iteritems(): + for (_ctype, mod) in c_handlers.items(): if mod in called: continue handlers.call_begin(mod, data, frequency) @@ -76,7 +76,7 @@ def main(): # Give callbacks opportunity to finalize called = [] - for (_ctype, mod) in c_handlers.iteritems(): + for (_ctype, mod) in c_handlers.items(): if mod in called: continue handlers.call_end(mod, data, frequency) -- cgit v1.2.3 From 09e81d572d8461d8546f66eacd005bf3c9ae0e39 Mon Sep 17 00:00:00 2001 From: Marco Morais Date: Thu, 22 Jan 2015 22:25:49 -0800 Subject: Make parameter list for get_hostname method consistent The sources.DataSource class has method defined as: def get_hostname(self, fqdn=False, resolve_ip=False) Make the parameter list for this method in DataSourceDigitalOcean and DataSourceGCE consistent with superclass sources.DataSource. --- cloudinit/sources/DataSourceDigitalOcean.py | 2 +- cloudinit/sources/DataSourceGCE.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 8f27ee89..fec9db4b 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -84,7 +84,7 @@ class DataSourceDigitalOcean(sources.DataSource): def get_instance_id(self): return self.metadata['id'] - def get_hostname(self, fqdn=False): + def get_hostname(self, fqdn=False, resolve_ip=False): return self.metadata['hostname'] def get_package_mirror_info(self): diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 2cf8fdcd..6936c74e 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -126,7 +126,7 @@ class DataSourceGCE(sources.DataSource): def get_public_ssh_keys(self): return self.metadata['public-keys'] - def get_hostname(self, fqdn=False, _resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False): # GCE has long FDQN's and has asked for short hostnames return self.metadata['local-hostname'].split('.')[0] -- cgit v1.2.3 From de5974fe93dd717e0c7ba6de17db3192cc258cff Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Mon, 26 Jan 2015 14:31:09 -0500 Subject: * More str/bytes fixes. * Temporarily skip the MAAS tests in py3 since they need to be ported to oauthlib. --- cloudinit/sources/DataSourceOpenNebula.py | 12 +++++++++--- cloudinit/sources/DataSourceSmartOS.py | 15 +++++++++++++-- tests/unittests/test_datasource/test_maas.py | 7 ++++++- tests/unittests/test_datasource/test_opennebula.py | 4 ++-- 4 files changed, 30 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index f9dac29e..691b39f8 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -379,7 +379,8 @@ def read_context_disk_dir(source_dir, asuser=None): raise BrokenContextDiskDir("configured user '%s' " "does not exist", asuser) try: - with open(os.path.join(source_dir, 'context.sh'), 'r') as f: + path = os.path.join(source_dir, 'context.sh') + with open(path, 'r', encoding='utf-8') as f: content = f.read().strip() context = parse_shell_config(content, asuser=asuser) @@ -426,14 +427,19 @@ def read_context_disk_dir(source_dir, asuser=None): context.get('USER_DATA_ENCODING')) if encoding == "base64": try: - results['userdata'] = base64.b64decode(results['userdata']) + userdata = base64.b64decode(results['userdata']) + # In Python 3 we still expect a str, but b64decode will return + # bytes. Convert to str. + if isinstance(userdata, bytes): + userdata = userdata.decode('utf-8') + results['userdata'] = userdata except TypeError: LOG.warn("Failed base64 decoding of userdata") # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration - for k in context.keys(): + for k in context: if re.match(r'^ETH\d+_IP$', k): (out, _) = util.subp(['/sbin/ip', 'link']) net = OpenNebulaNetwork(out, context) diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7a975d78..d3ed40c5 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -30,6 +30,7 @@ # Comments with "@datadictionary" are snippets of the definition import base64 +import binascii import os import serial @@ -350,8 +351,18 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, if b64: try: - return base64.b64decode(resp) - except TypeError: + # Generally, we want native strings in the values. Python 3's + # b64decode will return bytes though, so decode them to utf-8 if + # possible. If that fails, return the bytes. + decoded = base64.b64decode(resp) + try: + if isinstance(decoded, bytes): + return decoded.decode('utf-8') + except UnicodeDecodeError: + pass + return decoded + # Bogus input produces different errors in Python 2 and 3; catch both. + except (TypeError, binascii.Error): LOG.warn("Failed base64 decoding key '%s'", noun) return resp diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 6af0cd82..66fe22ae 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -4,7 +4,11 @@ import shutil import tempfile import unittest -from cloudinit.sources import DataSourceMAAS +# XXX DataSourceMAAS must be ported to oauthlib for Python 3 +import six +if not six.PY3: + from cloudinit.sources import DataSourceMAAS + from cloudinit import url_helper from ..helpers import populate_dir @@ -14,6 +18,7 @@ except ImportError: import mock +@unittest.skipIf(six.PY3, 'DataSourceMAAS must be ported to oauthlib') class TestMAASDataSource(unittest.TestCase): def setUp(self): diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 31c6232f..ef534bab 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -33,7 +33,7 @@ TEST_VARS = { } INVALID_CONTEXT = ';' -USER_DATA = b'#cloud-config\napt_upgrade: true' +USER_DATA = '#cloud-config\napt_upgrade: true' SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i' HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' @@ -300,7 +300,7 @@ class TestParseShellConfig(unittest.TestCase): def populate_context_dir(path, variables): data = "# Context variables generated by OpenNebula\n" - for (k, v) in variables.items(): + for k, v in variables.items(): data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''"))) populate_dir(path, {'context.sh': data}) -- cgit v1.2.3 From 18b35de06432869a9d859e2978e7e9567eba66a2 Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Mon, 26 Jan 2015 14:48:23 -0500 Subject: Another handling of b64decode. Also, restore Python 2 compatibility. --- cloudinit/config/cc_seed_random.py | 8 +++++++- cloudinit/sources/DataSourceOpenNebula.py | 11 ++++++++++- 2 files changed, 17 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 3b7235bf..981e1b08 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -38,7 +38,13 @@ def _decode(data, encoding=None): if not encoding or encoding.lower() in ['raw']: return data elif encoding.lower() in ['base64', 'b64']: - return base64.b64decode(data) + # Try to give us a native string in both Python 2 and 3, and remember + # that b64decode() returns bytes in Python 3. + decoded = base64.b64decode(data) + try: + return decoded.decode('utf-8') + except UnicodeDecodeError: + return decoded elif encoding.lower() in ['gzip', 'gz']: return util.decomp_gzip(data, quiet=False) else: diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 691b39f8..6da569ec 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -25,6 +25,7 @@ # along with this program. If not, see . import base64 +import codecs import os import pwd import re @@ -34,6 +35,8 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import util +import six + LOG = logging.getLogger(__name__) DEFAULT_IID = "iid-dsopennebula" @@ -43,6 +46,12 @@ CONTEXT_DISK_FILES = ["context.sh"] VALID_DSMODES = ("local", "net", "disabled") +def utf8_open(path): + if six.PY3: + return open(path, 'r', encoding='utf-8') + return codecs.open(path, 'r', encoding='utf-8') + + class DataSourceOpenNebula(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -380,7 +389,7 @@ def read_context_disk_dir(source_dir, asuser=None): "does not exist", asuser) try: path = os.path.join(source_dir, 'context.sh') - with open(path, 'r', encoding='utf-8') as f: + with utf8_open(path) as f: content = f.read().strip() context = parse_shell_config(content, asuser=asuser) -- cgit v1.2.3 From fabff4aec884467729fc372bb67f240752c15511 Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Mon, 26 Jan 2015 16:37:29 -0500 Subject: Port the MAAS code to oauthlib. --- cloudinit/sources/DataSourceMAAS.py | 56 ++++++++++++++++------------ requirements.txt | 2 +- tests/unittests/test_datasource/test_maas.py | 7 +--- 3 files changed, 35 insertions(+), 30 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 8f9c81de..39296f08 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -22,10 +22,11 @@ from __future__ import print_function from email.utils import parsedate import errno -import oauth.oauth as oauth +import oauthlib import os import time -import urllib2 + +from six.moves.urllib_request import Request, urlopen from cloudinit import log as logging from cloudinit import sources @@ -274,25 +275,34 @@ def check_seed_contents(content, seed): def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None): - consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) - token = oauth.OAuthToken(token_key, token_secret) - - if timestamp is None: - ts = int(time.time()) - else: - ts = timestamp - - params = { - 'oauth_version': "1.0", - 'oauth_nonce': oauth.generate_nonce(), - 'oauth_timestamp': ts, - 'oauth_token': token.key, - 'oauth_consumer_key': consumer.key, - } - req = oauth.OAuthRequest(http_url=url, parameters=params) - req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(), - consumer, token) - return req.to_header() + client = oauthlib.oauth1.Client( + consumer_key, + client_secret=consumer_secret, + resource_owner_key=token_key, + resource_owner_secret=token_secret, + signature_method=oauthlib.SIGNATURE_PLAINTEXT) + uri, signed_headers, body = client.sign(url) + return signed_headers + + ## consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) + ## token = oauth.OAuthToken(token_key, token_secret) + + ## if timestamp is None: + ## ts = int(time.time()) + ## else: + ## ts = timestamp + + ## params = { + ## 'oauth_version': "1.0", + ## 'oauth_nonce': oauth.generate_nonce(), + ## 'oauth_timestamp': ts, + ## 'oauth_token': token.key, + ## 'oauth_consumer_key': consumer.key, + ## } + ## req = oauth.OAuthRequest(http_url=url, parameters=params) + ## req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(), + ## consumer, token) + ## return req.to_header() class MAASSeedDirNone(Exception): @@ -359,8 +369,8 @@ if __name__ == "__main__": creds[key] = cfg[key] def geturl(url, headers_cb): - req = urllib2.Request(url, data=None, headers=headers_cb(url)) - return (urllib2.urlopen(req).read()) + req = Request(url, data=None, headers=headers_cb(url)) + return urlopen(req).read() def printurl(url, headers_cb): print("== %s ==\n%s\n" % (url, geturl(url, headers_cb))) diff --git a/requirements.txt b/requirements.txt index 2a12ca3e..19c88857 100644 --- a/requirements.txt +++ b/requirements.txt @@ -8,7 +8,7 @@ PrettyTable # This one is currently only used by the MAAS datasource. If that # datasource is removed, this is no longer needed -oauth +oauthlib # This one is currently used only by the CloudSigma and SmartOS datasources. # If these datasources are removed, this is no longer needed diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index 66fe22ae..6af0cd82 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -4,11 +4,7 @@ import shutil import tempfile import unittest -# XXX DataSourceMAAS must be ported to oauthlib for Python 3 -import six -if not six.PY3: - from cloudinit.sources import DataSourceMAAS - +from cloudinit.sources import DataSourceMAAS from cloudinit import url_helper from ..helpers import populate_dir @@ -18,7 +14,6 @@ except ImportError: import mock -@unittest.skipIf(six.PY3, 'DataSourceMAAS must be ported to oauthlib') class TestMAASDataSource(unittest.TestCase): def setUp(self): -- cgit v1.2.3 From c3ced2d4bdbbbdcb2466202e1571d4ea7bfc7c72 Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Tue, 27 Jan 2015 14:36:10 -0500 Subject: Remove a comment turd. --- cloudinit/sources/DataSourceMAAS.py | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 39296f08..082cc58f 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -284,26 +284,6 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, uri, signed_headers, body = client.sign(url) return signed_headers - ## consumer = oauth.OAuthConsumer(consumer_key, consumer_secret) - ## token = oauth.OAuthToken(token_key, token_secret) - - ## if timestamp is None: - ## ts = int(time.time()) - ## else: - ## ts = timestamp - - ## params = { - ## 'oauth_version': "1.0", - ## 'oauth_nonce': oauth.generate_nonce(), - ## 'oauth_timestamp': ts, - ## 'oauth_token': token.key, - ## 'oauth_consumer_key': consumer.key, - ## } - ## req = oauth.OAuthRequest(http_url=url, parameters=params) - ## req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(), - ## consumer, token) - ## return req.to_header() - class MAASSeedDirNone(Exception): pass -- cgit v1.2.3 From 69c64029997599b3f1764ef48fe571094e2ee5f2 Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Tue, 27 Jan 2015 14:40:05 -0500 Subject: Respond to review: - Just use util.load_file() instead of yet another way to open and read the file. --- cloudinit/sources/DataSourceOpenNebula.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 6da569ec..a0275cda 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -25,7 +25,6 @@ # along with this program. If not, see . import base64 -import codecs import os import pwd import re @@ -35,7 +34,6 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import util -import six LOG = logging.getLogger(__name__) @@ -46,12 +44,6 @@ CONTEXT_DISK_FILES = ["context.sh"] VALID_DSMODES = ("local", "net", "disabled") -def utf8_open(path): - if six.PY3: - return open(path, 'r', encoding='utf-8') - return codecs.open(path, 'r', encoding='utf-8') - - class DataSourceOpenNebula(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -389,9 +381,7 @@ def read_context_disk_dir(source_dir, asuser=None): "does not exist", asuser) try: path = os.path.join(source_dir, 'context.sh') - with utf8_open(path) as f: - content = f.read().strip() - + content = util.load_file(path) context = parse_shell_config(content, asuser=asuser) except util.ProcessExecutionError as e: raise BrokenContextDiskDir("Error processing context.sh: %s" % (e)) -- cgit v1.2.3 From 6e742d20e9ed56498925c7c850cd5da65d063b4b Mon Sep 17 00:00:00 2001 From: Barry Warsaw Date: Tue, 27 Jan 2015 15:03:52 -0500 Subject: Respond to review: - Refactor both the base64 encoding and decoding into utility functions. Also: - Mechanically fix some other broken untested code. --- cloudinit/config/cc_seed_random.py | 8 +------ cloudinit/config/cc_ssh_authkey_fingerprints.py | 2 +- cloudinit/sources/DataSourceOpenNebula.py | 7 +----- cloudinit/sources/DataSourceSmartOS.py | 11 +-------- cloudinit/util.py | 20 ++++++++++++++++ tests/unittests/test_datasource/test_azure.py | 28 ++++++++-------------- tests/unittests/test_datasource/test_opennebula.py | 11 ++------- tests/unittests/test_datasource/test_smartos.py | 14 ++++------- .../test_handler/test_handler_seed_random.py | 12 ++-------- 9 files changed, 42 insertions(+), 71 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index 981e1b08..bb64b0f5 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -38,13 +38,7 @@ def _decode(data, encoding=None): if not encoding or encoding.lower() in ['raw']: return data elif encoding.lower() in ['base64', 'b64']: - # Try to give us a native string in both Python 2 and 3, and remember - # that b64decode() returns bytes in Python 3. - decoded = base64.b64decode(data) - try: - return decoded.decode('utf-8') - except UnicodeDecodeError: - return decoded + return util.b64d(data) elif encoding.lower() in ['gzip', 'gz']: return util.decomp_gzip(data, quiet=False) else: diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 51580633..6ce831bc 100644 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -32,7 +32,7 @@ from cloudinit import util def _split_hash(bin_hash): split_up = [] - for i in xrange(0, len(bin_hash), 2): + for i in range(0, len(bin_hash), 2): split_up.append(bin_hash[i:i + 2]) return split_up diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index a0275cda..61709c1b 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -426,12 +426,7 @@ def read_context_disk_dir(source_dir, asuser=None): context.get('USER_DATA_ENCODING')) if encoding == "base64": try: - userdata = base64.b64decode(results['userdata']) - # In Python 3 we still expect a str, but b64decode will return - # bytes. Convert to str. - if isinstance(userdata, bytes): - userdata = userdata.decode('utf-8') - results['userdata'] = userdata + results['userdata'] = util.b64d(results['userdata']) except TypeError: LOG.warn("Failed base64 decoding of userdata") diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index f59ad3d6..9d48beab 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -351,16 +351,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, if b64: try: - # Generally, we want native strings in the values. Python 3's - # b64decode will return bytes though, so decode them to utf-8 if - # possible. If that fails, return the bytes. - decoded = base64.b64decode(resp) - try: - if isinstance(decoded, bytes): - return decoded.decode('utf-8') - except UnicodeDecodeError: - pass - return decoded + return util.b64d(resp) # Bogus input produces different errors in Python 2 and 3; catch both. except (TypeError, binascii.Error): LOG.warn("Failed base64 decoding key '%s'", noun) diff --git a/cloudinit/util.py b/cloudinit/util.py index 766f8e32..8916cc11 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -44,6 +44,7 @@ import sys import tempfile import time +from base64 import b64decode, b64encode from six.moves.urllib import parse as urlparse import six @@ -90,6 +91,25 @@ def encode_text(text, encoding='utf-8'): return text return text.encode(encoding) + +def b64d(source): + # Base64 decode some data, accepting bytes or unicode/str, and returning + # str/unicode if the result is utf-8 compatible, otherwise returning bytes. + decoded = b64decode(source) + if isinstance(decoded, bytes): + try: + return decoded.decode('utf-8') + except UnicodeDecodeError: + return decoded + +def b64e(source): + # Base64 encode some data, accepting bytes or unicode/str, and returning + # str/unicode if the result is utf-8 compatible, otherwise returning bytes. + if not isinstance(source, bytes): + source = source.encode('utf-8') + return b64encode(source).decode('utf-8') + + # Path for DMI Data DMI_SYS_PATH = "/sys/class/dmi/id" diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 97a53bee..965bce4b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1,5 +1,5 @@ from cloudinit import helpers -from cloudinit.util import load_file +from cloudinit.util import b64e, load_file from cloudinit.sources import DataSourceAzure from ..helpers import TestCase, populate_dir @@ -12,7 +12,6 @@ try: except ImportError: from contextlib2 import ExitStack -import base64 import crypt import os import stat @@ -22,13 +21,6 @@ import tempfile import unittest -def b64(source): - # In Python 3, b64encode only accepts bytes and returns bytes. - if not isinstance(source, bytes): - source = source.encode('utf-8') - return base64.b64encode(source).decode('us-ascii') - - def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): if data is None: data = {'HostName': 'FOOHOST'} @@ -58,7 +50,7 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): content += "<%s%s>%s\n" % (key, attrs, val, key) if userdata: - content += "%s\n" % (b64(userdata)) + content += "%s\n" % (b64e(userdata)) if pubkeys: content += "\n" @@ -189,7 +181,7 @@ class TestAzureDataSource(TestCase): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64(yaml.dump(cfg)), + 'dscfg': {'text': b64e(yaml.dump(cfg)), 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} @@ -241,7 +233,7 @@ class TestAzureDataSource(TestCase): def test_userdata_found(self): mydata = "FOOBAR" - odata = {'UserData': b64(mydata)} + odata = {'UserData': b64e(mydata)} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -289,7 +281,7 @@ class TestAzureDataSource(TestCase): 'command': 'my-bounce-command', 'hostname_command': 'my-hostname-command'}} odata = {'HostName': "xhost", - 'dscfg': {'text': b64(yaml.dump(cfg)), + 'dscfg': {'text': b64e(yaml.dump(cfg)), 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} self._get_ds(data).get_data() @@ -304,7 +296,7 @@ class TestAzureDataSource(TestCase): # config specifying set_hostname off should not bounce cfg = {'set_hostname': False} odata = {'HostName': "xhost", - 'dscfg': {'text': b64(yaml.dump(cfg)), + 'dscfg': {'text': b64e(yaml.dump(cfg)), 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} self._get_ds(data).get_data() @@ -333,7 +325,7 @@ class TestAzureDataSource(TestCase): # Make sure that user can affect disk aliases dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64(yaml.dump(dscfg)), + 'dscfg': {'text': b64e(yaml.dump(dscfg)), 'encoding': 'base64'}} usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, 'ephemeral0': False}} @@ -370,7 +362,7 @@ class TestAzureDataSource(TestCase): def test_existing_ovf_same(self): # waagent/SharedConfig left alone if found ovf-env.xml same as cached - odata = {'UserData': b64("SOMEUSERDATA")} + odata = {'UserData': b64e("SOMEUSERDATA")} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} populate_dir(self.waagent_d, @@ -394,9 +386,9 @@ class TestAzureDataSource(TestCase): # 'get_data' should remove SharedConfig.xml in /var/lib/waagent # if ovf-env.xml differs. cached_ovfenv = construct_valid_ovf_env( - {'userdata': b64("FOO_USERDATA")}) + {'userdata': b64e("FOO_USERDATA")}) new_ovfenv = construct_valid_ovf_env( - {'userdata': b64("NEW_USERDATA")}) + {'userdata': b64e("NEW_USERDATA")}) populate_dir(self.waagent_d, {'ovf-env.xml': cached_ovfenv, diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index e5a4bd18..27adf21b 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -3,19 +3,12 @@ from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util from ..helpers import TestCase, populate_dir -from base64 import b64encode import os import pwd import shutil import tempfile import unittest -def b64(source): - # In Python 3, b64encode only accepts bytes and returns bytes. - if not isinstance(source, bytes): - source = source.encode('utf-8') - return b64encode(source).decode('us-ascii') - TEST_VARS = { 'VAR1': 'single', @@ -186,7 +179,7 @@ class TestOpenNebulaDataSource(TestCase): self.assertEqual(USER_DATA, results['userdata']) def test_user_data_encoding_required_for_decode(self): - b64userdata = b64(USER_DATA) + b64userdata = util.b64e(USER_DATA) for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) populate_context_dir(my_d, {k: b64userdata}) @@ -198,7 +191,7 @@ class TestOpenNebulaDataSource(TestCase): def test_user_data_base64_encoding(self): for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: b64(USER_DATA), + populate_context_dir(my_d, {k: util.b64e(USER_DATA), 'USERDATA_ENCODING': 'base64'}) results = ds.read_context_disk_dir(my_d) diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index b5ebf94d..8b62b1b1 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -24,9 +24,9 @@ from __future__ import print_function -import base64 from cloudinit import helpers as c_helpers from cloudinit.sources import DataSourceSmartOS +from cloudinit.util import b64e from .. import helpers import os import os.path @@ -36,12 +36,6 @@ import tempfile import stat import uuid -def b64(source): - # In Python 3, b64encode only accepts bytes and returns bytes. - if not isinstance(source, bytes): - source = source.encode('utf-8') - return base64.b64encode(source).decode('us-ascii') - MOCK_RETURNS = { 'hostname': 'test-host', @@ -239,7 +233,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): my_returns = MOCK_RETURNS.copy() my_returns['base64_all'] = "true" for k in ('hostname', 'cloud-init:user-data'): - my_returns[k] = b64(my_returns[k]) + my_returns[k] = b64e(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() @@ -260,7 +254,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): my_returns['b64-cloud-init:user-data'] = "true" my_returns['b64-hostname'] = "true" for k in ('hostname', 'cloud-init:user-data'): - my_returns[k] = b64(my_returns[k]) + my_returns[k] = b64e(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() @@ -276,7 +270,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): my_returns = MOCK_RETURNS.copy() my_returns['base64_keys'] = 'hostname,ignored' for k in ('hostname',): - my_returns[k] = b64(my_returns[k]) + my_returns[k] = b64e(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) ret = dsrc.get_data() diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index d3f18fa0..0bcdcb31 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -18,7 +18,6 @@ from cloudinit.config import cc_seed_random -import base64 import gzip import tempfile @@ -38,13 +37,6 @@ import logging LOG = logging.getLogger(__name__) -def b64(source): - # In Python 3, b64encode only accepts bytes and returns bytes. - if not isinstance(source, bytes): - source = source.encode('utf-8') - return base64.b64encode(source).decode('us-ascii') - - class TestRandomSeed(t_help.TestCase): def setUp(self): super(TestRandomSeed, self).setUp() @@ -141,7 +133,7 @@ class TestRandomSeed(t_help.TestCase): self.assertEquals("big-toe", contents) def test_append_random_base64(self): - data = b64('bubbles') + data = util.b64e('bubbles') cfg = { 'random_seed': { 'file': self._seed_file, @@ -154,7 +146,7 @@ class TestRandomSeed(t_help.TestCase): self.assertEquals("bubbles", contents) def test_append_random_b64(self): - data = b64('kit-kat') + data = util.b64e('kit-kat') cfg = { 'random_seed': { 'file': self._seed_file, -- cgit v1.2.3 From f62b86bd45c8df78ada32ab4040a639c9d096202 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Feb 2015 01:09:34 +0000 Subject: fix random_seed module --- cloudinit/config/cc_seed_random.py | 16 ++++++++-------- cloudinit/sources/DataSourceAzure.py | 3 ++- cloudinit/util.py | 16 +++++++++------- 3 files changed, 19 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py index bb64b0f5..3288a853 100644 --- a/cloudinit/config/cc_seed_random.py +++ b/cloudinit/config/cc_seed_random.py @@ -22,7 +22,7 @@ import base64 import os -from six import StringIO +from six import BytesIO from cloudinit.settings import PER_INSTANCE from cloudinit import log as logging @@ -34,13 +34,13 @@ LOG = logging.getLogger(__name__) def _decode(data, encoding=None): if not data: - return '' + return b'' if not encoding or encoding.lower() in ['raw']: - return data + return util.encode_text(data) elif encoding.lower() in ['base64', 'b64']: - return util.b64d(data) + return base64.b64decode(data) elif encoding.lower() in ['gzip', 'gz']: - return util.decomp_gzip(data, quiet=False) + return util.decomp_gzip(data, quiet=False, decode=None) else: raise IOError("Unknown random_seed encoding: %s" % (encoding)) @@ -65,9 +65,9 @@ def handle_random_seed_command(command, required, env=None): def handle(name, cfg, cloud, log, _args): mycfg = cfg.get('random_seed', {}) seed_path = mycfg.get('file', '/dev/urandom') - seed_data = mycfg.get('data', '') + seed_data = mycfg.get('data', b'') - seed_buf = StringIO() + seed_buf = BytesIO() if seed_data: seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding'))) @@ -75,7 +75,7 @@ def handle(name, cfg, cloud, log, _args): # openstack meta_data.json metadata = cloud.datasource.metadata if metadata and 'random_seed' in metadata: - seed_buf.write(metadata['random_seed']) + seed_buf.write(util.encode_text(metadata['random_seed'])) seed_data = seed_buf.getvalue() if len(seed_data): diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 29ae2c22..c599d50f 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -124,7 +124,8 @@ class DataSourceAzureNet(sources.DataSource): LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here - seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True) + seed = util.load_file("/sys/firmware/acpi/tables/OEM0", + quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed diff --git a/cloudinit/util.py b/cloudinit/util.py index 3a921afe..c998154a 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -96,11 +96,10 @@ def b64d(source): # Base64 decode some data, accepting bytes or unicode/str, and returning # str/unicode if the result is utf-8 compatible, otherwise returning bytes. decoded = b64decode(source) - if isinstance(decoded, bytes): - try: - return decoded.decode('utf-8') - except UnicodeDecodeError: - return decoded + try: + return decoded.decode('utf-8') + except UnicodeDecodeError: + return decoded def b64e(source): # Base64 encode some data, accepting bytes or unicode/str, and returning @@ -354,11 +353,14 @@ def clean_filename(fn): return fn -def decomp_gzip(data, quiet=True): +def decomp_gzip(data, quiet=True, decode=True): try: buf = six.BytesIO(encode_text(data)) with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh: - return decode_binary(gh.read()) + if decode: + return decode_binary(gh.read()) + else: + return gh.read() except Exception as e: if quiet: return data -- cgit v1.2.3 From 10aeda45b32645542d03cd42bd830558a6354495 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 17 Feb 2015 16:33:23 +0000 Subject: Clean up imports in DataSourceCloudStack.py. --- cloudinit/sources/DataSourceCloudStack.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 1bbeca59..b8974dc1 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -26,14 +26,13 @@ import os import time +from socket import inet_ntoa +from struct import pack from cloudinit import ec2_utils as ec2 from cloudinit import log as logging -from cloudinit import sources from cloudinit import url_helper as uhelp -from cloudinit import util -from socket import inet_ntoa -from struct import pack +from cloudinit import sources, util LOG = logging.getLogger(__name__) -- cgit v1.2.3 From e626359a6ea47880f0c17add03502513ee3a6792 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 17 Feb 2015 16:33:23 +0000 Subject: Fetch and use passwords from CloudStack virtual router. --- cloudinit/sources/DataSourceCloudStack.py | 36 ++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index b8974dc1..0377d940 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -29,6 +29,8 @@ import time from socket import inet_ntoa from struct import pack +from six.moves import http_client + from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import url_helper as uhelp @@ -44,10 +46,11 @@ class DataSourceCloudStack(sources.DataSource): # Cloudstack has its metadata/userdata URLs located at # http:///latest/ self.api_ver = 'latest' - vr_addr = get_vr_address() - if not vr_addr: + self.vr_addr = get_vr_address() + if not self.vr_addr: raise RuntimeError("No virtual router found!") - self.metadata_address = "http://%s/" % (vr_addr) + self.metadata_address = "http://%s/" % (self.vr_addr,) + self.cfg = {} def _get_url_settings(self): mcfg = self.ds_cfg @@ -92,6 +95,9 @@ class DataSourceCloudStack(sources.DataSource): return bool(url) + def get_config_obj(self): + return self.cfg + def get_data(self): seed_ret = {} if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")): @@ -109,12 +115,36 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) + set_password = self.get_password() + if set_password: + self.cfg = { + 'ssh_pwauth': True, + 'password': set_password, + 'chpasswd': { + 'expire': False, + }, + } return True except Exception: util.logexc(LOG, 'Failed fetching from metadata service %s', self.metadata_address) return False + def get_password(self): + def _do_request(req_string): + conn = http_client.HTTPConnection(self.vr_addr, 8080) + conn.request('GET', '', headers={'DomU_Request': req_string}) + output = conn.sock.recv(1024).decode('utf-8').strip() + conn.close() + return output + password = _do_request('send_my_password') + if password in ['', 'saved_password']: + return None + if password == 'bad_request': + raise RuntimeError('Error when attempting to fetch root password.') + _do_request('saved_password') + return password + def get_instance_id(self): return self.metadata['instance-id'] -- cgit v1.2.3 From e01795dac74cd31bd6054e3185c2dba6203690ca Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 17 Feb 2015 16:33:23 +0000 Subject: Add explanatory comment. --- cloudinit/sources/DataSourceCloudStack.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 0377d940..5eda10a5 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -132,6 +132,9 @@ class DataSourceCloudStack(sources.DataSource): def get_password(self): def _do_request(req_string): + # We have to provide a valid HTTP request, but a valid HTTP + # response is not returned. This means that getresponse() chokes, + # so we use the socket directly to read off the password. conn = http_client.HTTPConnection(self.vr_addr, 8080) conn.request('GET', '', headers={'DomU_Request': req_string}) output = conn.sock.recv(1024).decode('utf-8').strip() -- cgit v1.2.3 From 589ced475c9e200d4645f0b06f7846dae412b194 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 18 Feb 2015 13:30:51 +0000 Subject: Read ovf-env.xml as bytes. This should fix the Azure data source on Python 3, and is appropriate as XML shouldn't really be read as a string. --- cloudinit/sources/DataSourceAzure.py | 4 ++-- tests/unittests/helpers.py | 5 +++-- tests/unittests/test_datasource/test_azure.py | 6 ++++++ 3 files changed, 11 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 444070bb..6e030217 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -137,7 +137,7 @@ class DataSourceAzureNet(sources.DataSource): if found != ddir: cached_ovfenv = util.load_file( - os.path.join(ddir, 'ovf-env.xml'), quiet=True) + os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False) if cached_ovfenv != files['ovf-env.xml']: # source was not walinux-agent's datadir, so we have to clean # up so 'wait_for_files' doesn't return early due to stale data @@ -593,7 +593,7 @@ def load_azure_ds_dir(source_dir): if not os.path.isfile(ovf_file): raise NonAzureDataSource("No ovf-env file found") - with open(ovf_file, "r") as fp: + with open(ovf_file, "rb") as fp: contents = fp.read() md, ud, cfg = read_azure_ovf(contents) diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index ce77af93..7516bd02 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -287,10 +287,11 @@ def populate_dir(path, files): if not os.path.exists(path): os.makedirs(path) for (name, content) in files.items(): - with open(os.path.join(path, name), "w") as fp: - fp.write(content) + with open(os.path.join(path, name), "wb") as fp: + fp.write(content.encode('utf-8')) fp.close() + try: skipIf = unittest.skipIf except AttributeError: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 965bce4b..38d70fcd 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -360,6 +360,12 @@ class TestAzureDataSource(TestCase): self.assertTrue(os.path.exists(ovf_env_path)) self.assertEqual(xml, load_file(ovf_env_path)) + def test_ovf_can_include_unicode(self): + xml = construct_valid_ovf_env(data={}) + xml = u'\ufeff{0}'.format(xml) + dsrc = self._get_ds({'ovfcontent': xml}) + dsrc.get_data() + def test_existing_ovf_same(self): # waagent/SharedConfig left alone if found ovf-env.xml same as cached odata = {'UserData': b64e("SOMEUSERDATA")} -- cgit v1.2.3 From 5e864eb373ead67d2bc29a19d970f9d3d94c53df Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 18 Feb 2015 18:09:34 +0000 Subject: Failing to fetch a CloudStack password should never fail the whole DS. There might be some CloudStack deployments without the :8080 password server, and there's no reason the rest of the data source can't be used for them. --- cloudinit/sources/DataSourceCloudStack.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 5eda10a5..a8f8daec 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -115,15 +115,21 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) - set_password = self.get_password() - if set_password: - self.cfg = { - 'ssh_pwauth': True, - 'password': set_password, - 'chpasswd': { - 'expire': False, - }, - } + try: + set_password = self.get_password() + except Exception: + util.logexc(LOG, + 'Failed to fetch password from virtual router %s', + self.vr_addr) + else: + if set_password: + self.cfg = { + 'ssh_pwauth': True, + 'password': set_password, + 'chpasswd': { + 'expire': False, + }, + } return True except Exception: util.logexc(LOG, 'Failed fetching from metadata service %s', -- cgit v1.2.3 From d3d44a3efaf22c91d342f2cb81470745b7be0658 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 18 Feb 2015 18:10:15 +0000 Subject: Set an explicit timeout when fetching CloudStack passwords. --- cloudinit/sources/DataSourceCloudStack.py | 1 + 1 file changed, 1 insertion(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index a8f8daec..89f58e1e 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -143,6 +143,7 @@ class DataSourceCloudStack(sources.DataSource): # so we use the socket directly to read off the password. conn = http_client.HTTPConnection(self.vr_addr, 8080) conn.request('GET', '', headers={'DomU_Request': req_string}) + conn.sock.settimeout(30) output = conn.sock.recv(1024).decode('utf-8').strip() conn.close() return output -- cgit v1.2.3 From b57c6a109491f344fa6e6fc2593ab2e60ca65249 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 20 Feb 2015 10:57:06 +0000 Subject: Minor formatting clean-up in CloudStack DS. --- cloudinit/sources/DataSourceCloudStack.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 89f58e1e..85f20c23 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -84,14 +84,14 @@ class DataSourceCloudStack(sources.DataSource): 'latest/meta-data/instance-id')] start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url) else: LOG.critical(("Giving up on waiting for the metadata from %s" " after %s seconds"), - urls, int(time.time() - start_time)) + urls, int(time.time() - start_time)) return bool(url) @@ -109,8 +109,8 @@ class DataSourceCloudStack(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.userdata_raw = ec2.get_instance_userdata( + self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", @@ -231,7 +231,7 @@ def get_vr_address(): # Used to match classes to dependencies datasources = [ - (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), ] -- cgit v1.2.3 From f8d9ebbe3743bcada75bc1a980b49f493e2da2f1 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 20 Feb 2015 10:57:18 +0000 Subject: Split CloudStack password handling out to separate class. --- cloudinit/sources/DataSourceCloudStack.py | 65 +++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 20 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 85f20c23..0c3c51c0 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -39,6 +39,49 @@ from cloudinit import sources, util LOG = logging.getLogger(__name__) +class CloudStackPasswordServerClient(object): + """ + Implements password fetching from the CloudStack password server. + + http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates + has documentation about the system. This implementation is following that + found at + https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian + + The CloudStack password server is, essentially, a broken HTTP + server. It requires us to provide a valid HTTP request (including a + DomU_Request header, which is the meat of the request), but just + writes the text of its response on to the socket, without a status + line or any HTTP headers. This makes HTTP libraries sad, which + explains the screwiness of the implementation of this class. + """ + + def __init__(self, virtual_router_address): + self.virtual_router_address = virtual_router_address + + def _do_request(self, domu_request): + # We have to provide a valid HTTP request, but a valid HTTP + # response is not returned. This means that getresponse() chokes, + # so we use the socket directly to read off the response. + # Because we're reading off the socket directly, we can't re-use the + # connection. + conn = http_client.HTTPConnection(self.virtual_router_address, 8080) + conn.request('GET', '', headers={'DomU_Request': domu_request}) + conn.sock.settimeout(30) + output = conn.sock.recv(1024).decode('utf-8').strip() + conn.close() + return output + + def get_password(self): + password = self._do_request('send_my_password') + if password in ['', 'saved_password']: + return None + if password == 'bad_request': + raise RuntimeError('Error when attempting to fetch root password.') + self._do_request('saved_password') + return password + + class DataSourceCloudStack(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -115,8 +158,9 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", int(time.time() - start_time)) + password_client = CloudStackPasswordServerClient(self.vr_addr) try: - set_password = self.get_password() + set_password = password_client.get_password() except Exception: util.logexc(LOG, 'Failed to fetch password from virtual router %s', @@ -136,25 +180,6 @@ class DataSourceCloudStack(sources.DataSource): self.metadata_address) return False - def get_password(self): - def _do_request(req_string): - # We have to provide a valid HTTP request, but a valid HTTP - # response is not returned. This means that getresponse() chokes, - # so we use the socket directly to read off the password. - conn = http_client.HTTPConnection(self.vr_addr, 8080) - conn.request('GET', '', headers={'DomU_Request': req_string}) - conn.sock.settimeout(30) - output = conn.sock.recv(1024).decode('utf-8').strip() - conn.close() - return output - password = _do_request('send_my_password') - if password in ['', 'saved_password']: - return None - if password == 'bad_request': - raise RuntimeError('Error when attempting to fetch root password.') - _do_request('saved_password') - return password - def get_instance_id(self): return self.metadata['instance-id'] -- cgit v1.2.3 From ef84bd214a1d5e0b922c0dd38096f694f8ff406e Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 23 Feb 2015 09:22:50 +0000 Subject: Always close the password server connection, even on failure. --- cloudinit/sources/DataSourceCloudStack.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 0c3c51c0..996076b1 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -66,10 +66,12 @@ class CloudStackPasswordServerClient(object): # Because we're reading off the socket directly, we can't re-use the # connection. conn = http_client.HTTPConnection(self.virtual_router_address, 8080) - conn.request('GET', '', headers={'DomU_Request': domu_request}) - conn.sock.settimeout(30) - output = conn.sock.recv(1024).decode('utf-8').strip() - conn.close() + try: + conn.request('GET', '', headers={'DomU_Request': domu_request}) + conn.sock.settimeout(30) + output = conn.sock.recv(1024).decode('utf-8').strip() + finally: + conn.close() return output def get_password(self): -- cgit v1.2.3 From 9ab6bbab42ffb5cadbe0afb36aa6967ed94459c3 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 23 Feb 2015 09:36:36 +0000 Subject: Add documentation about upstream CloudStack HTTP fix. --- cloudinit/sources/DataSourceCloudStack.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 996076b1..7b32e1fa 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -54,6 +54,9 @@ class CloudStackPasswordServerClient(object): writes the text of its response on to the socket, without a status line or any HTTP headers. This makes HTTP libraries sad, which explains the screwiness of the implementation of this class. + + This should be fixed in CloudStack by commit + a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014. """ def __init__(self, virtual_router_address): -- cgit v1.2.3 From 8cd5d7b143f882d80d45b1c04bdde1949846d4f1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 25 Feb 2015 19:40:33 -0500 Subject: move towards user-data being binary UrlResponse: biggest change... make readurl return bytes, making user know what to do with it. util: add load_tfile_or_url for loading text file or url as read_file_or_url now returns bytes ec2_utils: all meta-data is text, remove non-obvious string translations DigitalOcean: adjust for ec2_utils DataSourceGCE, DataSourceMAAS: user-data is binary other fields are text. openstack.py: read paths without decoding to text. This is ok as paths other than user-data are json, and load_json will handle load_file still returns text, and that is what most things use. --- cloudinit/ec2_utils.py | 14 +++++++++++--- cloudinit/sources/DataSourceDigitalOcean.py | 8 ++++++-- cloudinit/sources/DataSourceGCE.py | 21 ++++++++++++--------- cloudinit/sources/DataSourceMAAS.py | 14 +++++++++++--- cloudinit/sources/helpers/openstack.py | 2 +- cloudinit/url_helper.py | 2 +- cloudinit/util.py | 11 ++++++++--- tests/unittests/helpers.py | 5 ++++- tests/unittests/test_datasource/test_configdrive.py | 15 ++++++++++----- tests/unittests/test_datasource/test_gce.py | 2 +- tests/unittests/test_datasource/test_maas.py | 8 ++++---- tests/unittests/test_datasource/test_nocloud.py | 14 +++++++------- tests/unittests/test_datasource/test_openstack.py | 6 +++--- tests/unittests/test_ec2_util.py | 2 +- .../test_handler/test_handler_apt_configure.py | 12 ++++++------ tests/unittests/test_pathprefix2dict.py | 10 +++++----- 16 files changed, 91 insertions(+), 55 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index e1ed4091..7cf99186 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -41,6 +41,10 @@ class MetadataLeafDecoder(object): def __call__(self, field, blob): if not blob: return blob + try: + blob = util.decode_binary(blob) + except UnicodeDecodeError: + return blob if self._maybe_json_object(blob): try: # Assume it's json, unless it fails parsing... @@ -69,6 +73,8 @@ class MetadataMaterializer(object): def _parse(self, blob): leaves = {} children = [] + blob = util.decode_binary(blob) + if not blob: return (leaves, children) @@ -117,12 +123,12 @@ class MetadataMaterializer(object): child_url = url_helper.combine_url(base_url, c) if not child_url.endswith("/"): child_url += "/" - child_blob = str(self._caller(child_url)) + child_blob = self._caller(child_url) child_contents[c] = self._materialize(child_blob, child_url) leaf_contents = {} for (field, resource) in leaves.items(): leaf_url = url_helper.combine_url(base_url, resource) - leaf_blob = self._caller(leaf_url).contents + leaf_blob = self._caller(leaf_url) leaf_contents[field] = self._leaf_decoder(field, leaf_blob) joined = {} joined.update(child_contents) @@ -179,11 +185,13 @@ def get_instance_metadata(api_version='latest', caller = functools.partial(util.read_file_or_url, ssl_details=ssl_details, timeout=timeout, retries=retries) + def mcaller(url): + return caller(url).contents try: response = caller(md_url) materializer = MetadataMaterializer(response.contents, - md_url, caller, + md_url, mcaller, leaf_decoder=leaf_decoder) md = materializer.materialize() if not isinstance(md, (dict)): diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py index 76ddaa9d..5d47564d 100644 --- a/cloudinit/sources/DataSourceDigitalOcean.py +++ b/cloudinit/sources/DataSourceDigitalOcean.py @@ -54,9 +54,13 @@ class DataSourceDigitalOcean(sources.DataSource): def get_data(self): caller = functools.partial(util.read_file_or_url, timeout=self.timeout, retries=self.retries) - md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)), + + def mcaller(url): + return caller(url).contents + + md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address), base_url=self.metadata_address, - caller=caller) + caller=mcaller) self.metadata = md.materialize() diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 6936c74e..608c07f1 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -53,15 +53,15 @@ class DataSourceGCE(sources.DataSource): # GCE metadata server requires a custom header since v1 headers = {'X-Google-Metadata-Request': True} - # url_map: (our-key, path, required) + # url_map: (our-key, path, required, is_text) url_map = [ - ('instance-id', 'instance/id', True), - ('availability-zone', 'instance/zone', True), - ('local-hostname', 'instance/hostname', True), - ('public-keys', 'project/attributes/sshKeys', False), - ('user-data', 'instance/attributes/user-data', False), + ('instance-id', 'instance/id', True, True), + ('availability-zone', 'instance/zone', True, True), + ('local-hostname', 'instance/hostname', True, True), + ('public-keys', 'project/attributes/sshKeys', False, True), + ('user-data', 'instance/attributes/user-data', False, False), ('user-data-encoding', 'instance/attributes/user-data-encoding', - False), + False, True), ] # if we cannot resolve the metadata server, then no point in trying @@ -71,13 +71,16 @@ class DataSourceGCE(sources.DataSource): # iterate over url_map keys to get metadata items found = False - for (mkey, path, required) in url_map: + for (mkey, path, required, is_text) in url_map: try: resp = url_helper.readurl(url=self.metadata_address + path, headers=headers) if resp.code == 200: found = True - self.metadata[mkey] = resp.contents + if is_text: + self.metadata[mkey] = util.decode_binary(resp.contents) + else: + self.metadata[mkey] = resp.contents else: if required: msg = "required url %s returned code %s. not GCE" diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 082cc58f..35c5b5e1 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -36,6 +36,8 @@ from cloudinit import util LOG = logging.getLogger(__name__) MD_VERSION = "2012-03-01" +BINARY_FIELDS = ('user-data',) + class DataSourceMAAS(sources.DataSource): """ @@ -185,7 +187,9 @@ def read_maas_seed_dir(seed_d): md = {} for fname in files: try: - md[fname] = util.load_file(os.path.join(seed_d, fname)) + print("fname: %s / %s" % (fname, fname not in BINARY_FIELDS)) + md[fname] = util.load_file(os.path.join(seed_d, fname), + decode=fname not in BINARY_FIELDS) except IOError as e: if e.errno != errno.ENOENT: raise @@ -218,6 +222,7 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'), 'user-data': "%s/%s" % (base_url, 'user-data'), } + md = {} for name in file_order: url = files.get(name) @@ -238,7 +243,10 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, timeout=timeout, ssl_details=ssl_details) if resp.ok(): - md[name] = str(resp) + if name in BINARY_FIELDS: + md[name] = resp.contents + else: + md[name] = util.decode_binary(resp.contents) else: LOG.warn(("Fetching from %s resulted in" " an invalid http code %s"), url, resp.code) @@ -263,7 +271,7 @@ def check_seed_contents(content, seed): if len(missing): raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing)) - userdata = content.get('user-data', "") + userdata = content.get('user-data', b"") md = {} for (key, val) in content.items(): if key == 'user-data': diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index 88c7a198..bd93d22f 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -327,7 +327,7 @@ class ConfigDriveReader(BaseReader): return os.path.join(*components) def _path_read(self, path): - return util.load_file(path) + return util.load_file(path, decode=False) def _fetch_available_versions(self): if self._versions is None: diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 62001dff..2d81a062 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -119,7 +119,7 @@ class UrlResponse(object): @property def contents(self): - return self._response.text + return self._response.content @property def url(self): diff --git a/cloudinit/util.py b/cloudinit/util.py index 4fbdf0a9..efbc3c8d 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -739,6 +739,10 @@ def fetch_ssl_details(paths=None): return ssl_details +def load_tfile_or_url(*args, **kwargs): + return(decode_binary(read_file_or_url(*args, **kwargs).contents)) + + def read_file_or_url(url, timeout=5, retries=10, headers=None, data=None, sec_between=1, ssl_details=None, headers_cb=None, exception_cb=None): @@ -750,7 +754,7 @@ def read_file_or_url(url, timeout=5, retries=10, LOG.warn("Unable to post data to file resource %s", url) file_path = url[len("file://"):] try: - contents = load_file(file_path) + contents = load_file(file_path, decode=False) except IOError as e: code = e.errno if e.errno == errno.ENOENT: @@ -806,7 +810,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud_url = "%s%s%s" % (base, "user-data", ext) md_url = "%s%s%s" % (base, "meta-data", ext) - md_resp = read_file_or_url(md_url, timeout, retries, file_retries) + md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries) md = None if md_resp.ok(): md = load_yaml(md_resp.contents, default={}) @@ -815,6 +819,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0): ud = None if ud_resp.ok(): ud = ud_resp.contents + print("returning %s (%s)" % (ud_resp.contents.__class__, ud_resp.contents)) return (md, ud) @@ -2030,7 +2035,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep): ret = {} for f in required + optional: try: - ret[f] = load_file(base + delim + f, quiet=False) + ret[f] = load_file(base + delim + f, quiet=False, decode=False) except IOError as e: if e.errno != errno.ENOENT: raise diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py index 7516bd02..24e1e881 100644 --- a/tests/unittests/helpers.py +++ b/tests/unittests/helpers.py @@ -288,7 +288,10 @@ def populate_dir(path, files): os.makedirs(path) for (name, content) in files.items(): with open(os.path.join(path, name), "wb") as fp: - fp.write(content.encode('utf-8')) + if isinstance(content, six.binary_type): + fp.write(content) + else: + fp.write(content.encode('utf-8')) fp.close() diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index e28bdd84..83aca505 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -2,6 +2,7 @@ from copy import copy import json import os import shutil +import six import tempfile try: @@ -45,7 +46,7 @@ EC2_META = { 'reservation-id': 'r-iru5qm4m', 'security-groups': ['default'] } -USER_DATA = '#!/bin/sh\necho This is user data\n' +USER_DATA = b'#!/bin/sh\necho This is user data\n' OSTACK_META = { 'availability_zone': 'nova', 'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'}, @@ -56,8 +57,8 @@ OSTACK_META = { 'public_keys': {'mykey': PUBKEY}, 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'} -CONTENT_0 = 'This is contents of /etc/foo.cfg\n' -CONTENT_1 = '# this is /etc/bar/bar.cfg\n' +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' CFG_DRIVE_FILES_V2 = { 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), @@ -346,8 +347,12 @@ def populate_dir(seed_dir, files): dirname = os.path.dirname(path) if not os.path.isdir(dirname): os.makedirs(dirname) - with open(path, "w") as fp: + if isinstance(content, six.text_type): + mode = "w" + else: + mode = "wb" + + with open(path, mode) as fp: fp.write(content) - fp.close() # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 6dd4b5ed..d28f3b08 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -32,7 +32,7 @@ GCE_META = { 'instance/zone': 'foo/bar', 'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server', 'instance/hostname': 'server.project-foo.local', - 'instance/attributes/user-data': '/bin/echo foo\n', + 'instance/attributes/user-data': b'/bin/echo foo\n', } GCE_META_PARTIAL = { diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index d25e1adc..f109bb04 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -26,7 +26,7 @@ class TestMAASDataSource(TestCase): data = {'instance-id': 'i-valid01', 'local-hostname': 'valid01-hostname', - 'user-data': 'valid01-userdata', + 'user-data': b'valid01-userdata', 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} my_d = os.path.join(self.tmp, "valid") @@ -46,7 +46,7 @@ class TestMAASDataSource(TestCase): data = {'instance-id': 'i-valid-extra', 'local-hostname': 'valid-extra-hostname', - 'user-data': 'valid-extra-userdata', 'foo': 'bar'} + 'user-data': b'valid-extra-userdata', 'foo': 'bar'} my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) @@ -103,7 +103,7 @@ class TestMAASDataSource(TestCase): 'meta-data/instance-id': 'i-instanceid', 'meta-data/local-hostname': 'test-hostname', 'meta-data/public-keys': 'test-hostname', - 'user-data': 'foodata', + 'user-data': b'foodata', } valid_order = [ 'meta-data/local-hostname', @@ -143,7 +143,7 @@ class TestMAASDataSource(TestCase): userdata, metadata = DataSourceMAAS.read_maas_seed_url( my_seed, header_cb=my_headers_cb, version=my_ver) - self.assertEqual("foodata", userdata) + self.assertEqual(b"foodata", userdata) self.assertEqual(metadata['instance-id'], valid['meta-data/instance-id']) self.assertEqual(metadata['local-hostname'], diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 4f967f58..85b4c25a 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -37,7 +37,7 @@ class TestNoCloudDataSource(TestCase): def test_nocloud_seed_dir(self): md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = "USER_DATA_HERE" + ud = b"USER_DATA_HERE" populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), {'user-data': ud, 'meta-data': yaml.safe_dump(md)}) @@ -92,20 +92,20 @@ class TestNoCloudDataSource(TestCase): data = { 'fs_label': None, 'meta-data': yaml.safe_dump({'instance-id': 'IID'}), - 'user-data': "USER_DATA_RAW", + 'user-data': b"USER_DATA_RAW", } sys_cfg = {'datasource': {'NoCloud': data}} dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, "USER_DATA_RAW") + self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW") self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') self.assertTrue(ret) def test_nocloud_seed_with_vendordata(self): md = {'instance-id': 'IID', 'dsmode': 'local'} - ud = "USER_DATA_HERE" - vd = "THIS IS MY VENDOR_DATA" + ud = b"USER_DATA_HERE" + vd = b"THIS IS MY VENDOR_DATA" populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), {'user-data': ud, 'meta-data': yaml.safe_dump(md), @@ -126,7 +126,7 @@ class TestNoCloudDataSource(TestCase): def test_nocloud_no_vendordata(self): populate_dir(os.path.join(self.paths.seed_dir, "nocloud"), - {'user-data': "ud", 'meta-data': "instance-id: IID\n"}) + {'user-data': b"ud", 'meta-data': "instance-id: IID\n"}) sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} @@ -134,7 +134,7 @@ class TestNoCloudDataSource(TestCase): dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths) ret = dsrc.get_data() - self.assertEqual(dsrc.userdata_raw, "ud") + self.assertEqual(dsrc.userdata_raw, b"ud") self.assertFalse(dsrc.vendordata) self.assertTrue(ret) diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py index 81ef1546..81411ced 100644 --- a/tests/unittests/test_datasource/test_openstack.py +++ b/tests/unittests/test_datasource/test_openstack.py @@ -49,7 +49,7 @@ EC2_META = { 'public-ipv4': '0.0.0.1', 'reservation-id': 'r-iru5qm4m', } -USER_DATA = '#!/bin/sh\necho This is user data\n' +USER_DATA = b'#!/bin/sh\necho This is user data\n' VENDOR_DATA = { 'magic': '', } @@ -63,8 +63,8 @@ OSTACK_META = { 'public_keys': {'mykey': PUBKEY}, 'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c', } -CONTENT_0 = 'This is contents of /etc/foo.cfg\n' -CONTENT_1 = '# this is /etc/bar/bar.cfg\n' +CONTENT_0 = b'This is contents of /etc/foo.cfg\n' +CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' OS_FILES = { 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), 'openstack/latest/user_data': USER_DATA, diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py index 84aa002e..bd43accf 100644 --- a/tests/unittests/test_ec2_util.py +++ b/tests/unittests/test_ec2_util.py @@ -16,7 +16,7 @@ class TestEc2Util(helpers.HttprettyTestCase): body='stuff', status=200) userdata = eu.get_instance_userdata(self.VERSION) - self.assertEquals('stuff', userdata) + self.assertEquals('stuff', userdata.decode('utf-8')) @hp.activate def test_userdata_fetch_fail_not_found(self): diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index d8fe9a4f..02cad8b2 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -30,7 +30,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = str(util.read_file_or_url(self.pfile)) + contents = util.load_tfile_or_url(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_http_proxy_written(self): @@ -40,7 +40,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = str(util.read_file_or_url(self.pfile)) + contents = util.load_tfile_or_url(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "myproxy")) def test_apt_all_proxy_written(self): @@ -58,7 +58,7 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.pfile)) self.assertFalse(os.path.isfile(self.cfile)) - contents = str(util.read_file_or_url(self.pfile)) + contents = util.load_tfile_or_url(self.pfile) for ptype, pval in values.items(): self.assertTrue(self._search_apt_config(contents, ptype, pval)) @@ -74,7 +74,7 @@ class TestAptProxyConfig(TestCase): cc_apt_configure.apply_apt_config({'apt_proxy': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.pfile)) - contents = str(util.read_file_or_url(self.pfile)) + contents = util.load_tfile_or_url(self.pfile) self.assertTrue(self._search_apt_config(contents, "http", "foo")) def test_config_written(self): @@ -86,14 +86,14 @@ class TestAptProxyConfig(TestCase): self.assertTrue(os.path.isfile(self.cfile)) self.assertFalse(os.path.isfile(self.pfile)) - self.assertEqual(str(util.read_file_or_url(self.cfile)), payload) + self.assertEqual(util.load_tfile_or_url(self.cfile), payload) def test_config_replaced(self): util.write_file(self.pfile, "content doesnt matter") cc_apt_configure.apply_apt_config({'apt_config': "foo"}, self.pfile, self.cfile) self.assertTrue(os.path.isfile(self.cfile)) - self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo") + self.assertEqual(util.load_tfile_or_url(self.cfile), "foo") def test_config_deleted(self): # if no 'apt_config' is provided, delete any previously written file diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py index 7089bde6..38fd75b6 100644 --- a/tests/unittests/test_pathprefix2dict.py +++ b/tests/unittests/test_pathprefix2dict.py @@ -14,28 +14,28 @@ class TestPathPrefix2Dict(TestCase): self.addCleanup(shutil.rmtree, self.tmp) def test_required_only(self): - dirdata = {'f1': 'f1content', 'f2': 'f2content'} + dirdata = {'f1': b'f1content', 'f2': b'f2content'} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2']) self.assertEqual(dirdata, ret) def test_required_missing(self): - dirdata = {'f1': 'f1content'} + dirdata = {'f1': b'f1content'} populate_dir(self.tmp, dirdata) kwargs = {'required': ['f1', 'f2']} self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs) def test_no_required_and_optional(self): - dirdata = {'f1': 'f1c', 'f2': 'f2c'} + dirdata = {'f1': b'f1c', 'f2': b'f2c'} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=None, - optional=['f1', 'f2']) + optional=['f1', 'f2']) self.assertEqual(dirdata, ret) def test_required_and_optional(self): - dirdata = {'f1': 'f1c', 'f2': 'f2c'} + dirdata = {'f1': b'f1c', 'f2': b'f2c'} populate_dir(self.tmp, dirdata) ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2']) -- cgit v1.2.3 From 72958f9c40f53c634d1eb7ef55547271e1972d2c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 2 Mar 2015 16:34:46 -0500 Subject: DataSourceMAAS: fix oauthlib imports In both python2 and python3, This throws "'module' object has no attribute 'oauth1'" $ python3 -c 'import oauthlib; oauthlib.oauth1.Client("x")' While this works fine: $ python3 -c 'import oauthlib.oauth1 as oauth1; oauth1.Client("x")' --- cloudinit/sources/DataSourceMAAS.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 35c5b5e1..6cc010b7 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -22,7 +22,7 @@ from __future__ import print_function from email.utils import parsedate import errno -import oauthlib +import oauthlib.oauth1 as oauth1 import os import time @@ -283,12 +283,12 @@ def check_seed_contents(content, seed): def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None): - client = oauthlib.oauth1.Client( + client = oauth1.Client( consumer_key, client_secret=consumer_secret, resource_owner_key=token_key, resource_owner_secret=token_secret, - signature_method=oauthlib.SIGNATURE_PLAINTEXT) + signature_method=oauth1.SIGNATURE_PLAINTEXT) uri, signed_headers, body = client.sign(url) return signed_headers -- cgit v1.2.3 From 014468ea3fb36e81a3e5a6fc593ce91571c1495f Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 4 Mar 2015 17:20:22 +0000 Subject: Fix invalid format string in CloudSigma logging. --- cloudinit/sources/DataSourceCloudSigma.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 76597116..f8f94759 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -59,7 +59,7 @@ class DataSourceCloudSigma(sources.DataSource): LOG.warn("failed to get hypervisor product name via dmi data") return False else: - LOG.debug("detected hypervisor as {}".format(sys_product_name)) + LOG.debug("detected hypervisor as %s", sys_product_name) return 'cloudsigma' in sys_product_name.lower() LOG.warn("failed to query dmi data for system product name") -- cgit v1.2.3 From 692078e75a3f8af92a0151ad30b6a4ecc64b4b35 Mon Sep 17 00:00:00 2001 From: Oleg Strikov Date: Thu, 5 Mar 2015 20:26:10 +0300 Subject: DataSourceMAAS: generate oauth headers with adjusted timestamp in case of clock skew This functionality has been introduced to fix LP: #978127, but was lost while migrating cloud-init to python3. --- cloudinit/sources/DataSourceMAAS.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6cc010b7..9f9cf3ab 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -288,7 +288,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, client_secret=consumer_secret, resource_owner_key=token_key, resource_owner_secret=token_secret, - signature_method=oauth1.SIGNATURE_PLAINTEXT) + signature_method=oauth1.SIGNATURE_PLAINTEXT, + timestamp=timestamp) uri, signed_headers, body = client.sign(url) return signed_headers -- cgit v1.2.3 From ec23db8b0450c8f76305295bea5ec3178dd5f176 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 10 Mar 2015 16:18:20 -0400 Subject: DataSourceMAAS: remove debug statement --- cloudinit/sources/DataSourceMAAS.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 9f9cf3ab..53f097e6 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -187,7 +187,6 @@ def read_maas_seed_dir(seed_d): md = {} for fname in files: try: - print("fname: %s / %s" % (fname, fname not in BINARY_FIELDS)) md[fname] = util.load_file(os.path.join(seed_d, fname), decode=fname not in BINARY_FIELDS) except IOError as e: -- cgit v1.2.3 From 5f2b73c8ae292cf400b811f3b3f808be6019a60c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 10 Mar 2015 17:04:59 -0400 Subject: DataSourceMAAS: fix timestamp error in oauthlib oddly enough, the timestamp you pass into oauthlib must be a None or a string. If not, raises ValueError: Only unicode objects are escapable. Got 1426021488 of type --- cloudinit/sources/DataSourceMAAS.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 53f097e6..c1a0eb61 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -282,6 +282,11 @@ def check_seed_contents(content, seed): def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, timestamp=None): + if timestamp: + timestamp = str(timestamp) + else: + timestamp = None + client = oauth1.Client( consumer_key, client_secret=consumer_secret, -- cgit v1.2.3 From c8a7b446de26c6bc19df1b8bb7d2b39cb9487749 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 13 Mar 2015 10:18:12 +0000 Subject: Write and read bytes to/from the SmartOS serial console. --- cloudinit/sources/DataSourceSmartOS.py | 5 +++-- tests/unittests/test_datasource/test_smartos.py | 15 ++++++++++----- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 9d48beab..896fde3f 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -319,7 +319,8 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, return False ser = get_serial(seed_device, seed_timeout) - ser.write("GET %s\n" % noun.rstrip()) + request_line = "GET %s\n" % noun.rstrip() + ser.write(request_line.encode('ascii')) status = str(ser.readline()).rstrip() response = [] eom_found = False @@ -329,7 +330,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, return default while not eom_found: - m = ser.readline() + m = ser.readline().decode('ascii') if m.rstrip() == ".": eom_found = True else: diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 8b62b1b1..cb0ab984 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -36,6 +36,8 @@ import tempfile import stat import uuid +import six + MOCK_RETURNS = { 'hostname': 'test-host', @@ -78,24 +80,27 @@ class MockSerial(object): return True def write(self, line): - line = line.replace('GET ', '') + if not isinstance(line, six.binary_type): + raise TypeError("Should be writing binary lines.") + line = line.decode('ascii').replace('GET ', '') self.last = line.rstrip() def readline(self): if self.new: self.new = False if self.last in self.mockdata: - return 'SUCCESS\n' + line = 'SUCCESS\n' else: - return 'NOTFOUND %s\n' % self.last + line = 'NOTFOUND %s\n' % self.last - if self.last in self.mockdata: + elif self.last in self.mockdata: if not self.mocked_out: self.mocked_out = [x for x in self._format_out()] if len(self.mocked_out) > self.count: self.count += 1 - return self.mocked_out[self.count - 1] + line = self.mocked_out[self.count - 1] + return line.encode('ascii') def _format_out(self): if self.last in self.mockdata: -- cgit v1.2.3 From 7c63a4096d9b6c9dc10605c289ee048c7b0778c6 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 15:54:07 +0000 Subject: Convert DataSourceSmartOS to use v2 metadata. --- cloudinit/sources/DataSourceSmartOS.py | 75 +++++--- tests/unittests/test_datasource/test_smartos.py | 216 ++++++++++++++++++++---- 2 files changed, 239 insertions(+), 52 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 896fde3f..694a011a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -29,9 +29,10 @@ # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html # Comments with "@datadictionary" are snippets of the definition -import base64 import binascii import os +import random +import re import serial from cloudinit import log as logging @@ -301,6 +302,53 @@ def get_serial(seed_device, seed_timeout): return ser +class JoyentMetadataFetchException(Exception): + pass + + +class JoyentMetadataClient(object): + + def __init__(self, serial): + self.serial = serial + + def _checksum(self, body): + return '{0:08x}'.format( + binascii.crc32(body.encode('utf-8')) & 0xffffffff) + + def _get_value_from_frame(self, expected_request_id, frame): + regex = ( + r'V2 (?P\d+) (?P[0-9a-f]+)' + r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' + r'( (?P.+))?)') + frame_data = re.match(regex, frame).groupdict() + if int(frame_data['length']) != len(frame_data['body']): + raise JoyentMetadataFetchException( + 'Incorrect frame length given ({0} != {1}).'.format( + frame_data['length'], len(frame_data['body']))) + expected_checksum = self._checksum(frame_data['body']) + if frame_data['checksum'] != expected_checksum: + raise JoyentMetadataFetchException( + 'Invalid checksum (expected: {0}; got {1}).'.format( + expected_checksum, frame_data['checksum'])) + if frame_data['request_id'] != expected_request_id: + raise JoyentMetadataFetchException( + 'Request ID mismatch (expected: {0}; got {1}).'.format( + expected_request_id, frame_data['request_id'])) + if not frame_data.get('payload', None): + return None + return util.b64d(frame_data['payload']) + + def get_metadata(self, metadata_key): + request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) + message_body = '{0} GET {1}'.format(request_id, + util.b64e(metadata_key)) + msg = 'V2 {0} {1} {2}\n'.format( + len(message_body), self._checksum(message_body), message_body) + self.serial.write(msg.encode('ascii')) + response = self.serial.readline().decode('ascii') + return self._get_value_from_frame(request_id, response) + + def query_data(noun, seed_device, seed_timeout, strip=False, default=None, b64=None): """Makes a request to via the serial console via "GET " @@ -314,34 +362,21 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, encoded, so this method relies on being told if the data is base64 or not. """ - if not noun: return False ser = get_serial(seed_device, seed_timeout) - request_line = "GET %s\n" % noun.rstrip() - ser.write(request_line.encode('ascii')) - status = str(ser.readline()).rstrip() - response = [] - eom_found = False - - if 'SUCCESS' not in status: - ser.close() - return default - - while not eom_found: - m = ser.readline().decode('ascii') - if m.rstrip() == ".": - eom_found = True - else: - response.append(m) + client = JoyentMetadataClient(ser) + response = client.get_metadata(noun) ser.close() + if response is None: + return default if b64 is None: b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) + seed_timeout=seed_timeout, b64=False, + default=False, strip=True) b64 = util.is_true(b64) resp = None diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index cdd83bf8..c79cf3aa 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,15 +31,24 @@ import shutil import stat import tempfile import uuid +from binascii import crc32 + +import serial +import six import six from cloudinit import helpers as c_helpers from cloudinit.sources import DataSourceSmartOS -from cloudinit.util import b64e +from cloudinit.util import b64d, b64e from .. import helpers +try: + from unittest import mock +except ImportError: + import mock + MOCK_RETURNS = { 'hostname': 'test-host', 'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname', @@ -57,6 +66,37 @@ MOCK_RETURNS = { DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') +def _checksum(body): + return '{0:08x}'.format(crc32(body.encode('utf-8')) & 0xffffffff) + + +def _generate_v2_frame(request_id, command, body=None): + body_parts = [request_id, command] + if body: + body_parts.append(b64e(body)) + message_body = ' '.join(body_parts) + return 'V2 {0} {1} {2}\n'.format( + len(message_body), _checksum(message_body), message_body).encode( + 'ascii') + + +def _parse_v2_frame(line): + line = line.decode('ascii') + if not line.endswith('\n'): + raise Exception('Frames must end with a newline.') + version, length, checksum, body = line.strip().split(' ', 3) + if version != 'V2': + raise Exception('Frames must begin with V2.') + if int(length) != len(body): + raise Exception('Incorrect frame length given ({0} != {1}).'.format( + length, len(body))) + expected_checksum = _checksum(body) + if checksum != expected_checksum: + raise Exception('Invalid checksum.') + request_id, command, payload = body.split() + return request_id, command, b64d(payload) + + class MockSerial(object): """Fake a serial terminal for testing the code that interfaces with the serial""" @@ -81,39 +121,21 @@ class MockSerial(object): return True def write(self, line): - if not isinstance(line, six.binary_type): - raise TypeError("Should be writing binary lines.") - line = line.decode('ascii').replace('GET ', '') - self.last = line.rstrip() + self.last = line def readline(self): - if self.new: - self.new = False - if self.last in self.mockdata: - line = 'SUCCESS\n' - else: - line = 'NOTFOUND %s\n' % self.last - - elif self.last in self.mockdata: - if not self.mocked_out: - self.mocked_out = [x for x in self._format_out()] - - if len(self.mocked_out) > self.count: - self.count += 1 - line = self.mocked_out[self.count - 1] - return line.encode('ascii') - - def _format_out(self): - if self.last in self.mockdata: - _mret = self.mockdata[self.last] - try: - for l in _mret.splitlines(): - yield "%s\n" % l.rstrip() - except: - yield "%s\n" % _mret.rstrip() - - yield '.' - yield '\n' + if self.last == '\n': + return 'invalid command\n' + elif self.last == 'NEGOTIATE V2\n': + return 'V2_OK\n' + request_id, command, request_body = _parse_v2_frame(self.last) + if command != 'GET': + raise Exception('MockSerial only supports GET requests.') + metadata_key = request_body.strip() + if metadata_key in self.mockdata: + return _generate_v2_frame( + request_id, 'SUCCESS', self.mockdata[metadata_key]) + return _generate_v2_frame(request_id, 'NOTFOUND') class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): @@ -459,3 +481,133 @@ def apply_patches(patches): setattr(ref, name, replace) ret.append((ref, name, orig)) return ret + + +class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): + + def setUp(self): + super(TestJoyentMetadataClient, self).setUp() + self.serial = mock.MagicMock(spec=serial.Serial) + self.request_id = 0xabcdef12 + self.metadata_value = 'value' + self.response_parts = { + 'command': 'SUCCESS', + 'crc': 'b5a9ff00', + 'length': 17 + len(b64e(self.metadata_value)), + 'payload': b64e(self.metadata_value), + 'request_id': '{0:08x}'.format(self.request_id), + } + + def make_response(): + payload = '' + if self.response_parts['payload']: + payload = ' {0}'.format(self.response_parts['payload']) + del self.response_parts['payload'] + return ( + 'V2 {length} {crc} {request_id} {command}{payload}\n'.format( + payload=payload, **self.response_parts).encode('ascii')) + self.serial.readline.side_effect = make_response + self.patched_funcs.enter_context( + mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', + mock.Mock(return_value=self.request_id))) + + def _get_client(self): + return DataSourceSmartOS.JoyentMetadataClient(self.serial) + + def assertEndsWith(self, haystack, prefix): + self.assertTrue(haystack.endswith(prefix), + "{0} does not end with '{1}'".format( + repr(haystack), prefix)) + + def assertStartsWith(self, haystack, prefix): + self.assertTrue(haystack.startswith(prefix), + "{0} does not start with '{1}'".format( + repr(haystack), prefix)) + + def test_get_metadata_writes_a_single_line(self): + client = self._get_client() + client.get_metadata('some_key') + self.assertEqual(1, self.serial.write.call_count) + written_line = self.serial.write.call_args[0][0] + self.assertEndsWith(written_line, b'\n') + self.assertEqual(1, written_line.count(b'\n')) + + def _get_written_line(self, key='some_key'): + client = self._get_client() + client.get_metadata(key) + return self.serial.write.call_args[0][0] + + def test_get_metadata_writes_bytes(self): + self.assertIsInstance(self._get_written_line(), six.binary_type) + + def test_get_metadata_line_starts_with_v2(self): + self.assertStartsWith(self._get_written_line(), b'V2') + + def test_get_metadata_uses_get_command(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + self.assertEqual('GET', parts[4]) + + def test_get_metadata_base64_encodes_argument(self): + key = 'my_key' + parts = self._get_written_line(key).decode('ascii').strip().split(' ') + self.assertEqual(b64e(key), parts[5]) + + def test_get_metadata_calculates_length_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_length = len(' '.join(parts[3:])) + self.assertEqual(expected_length, int(parts[1])) + + def test_get_metadata_uses_appropriate_request_id(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + request_id = parts[3] + self.assertEqual(8, len(request_id)) + self.assertEqual(request_id, request_id.lower()) + + def test_get_metadata_uses_random_number_for_request_id(self): + line = self._get_written_line() + request_id = line.decode('ascii').strip().split(' ')[3] + self.assertEqual('{0:08x}'.format(self.request_id), request_id) + + def test_get_metadata_checksums_correctly(self): + parts = self._get_written_line().decode('ascii').strip().split(' ') + expected_checksum = '{0:08x}'.format( + crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff) + checksum = parts[2] + self.assertEqual(expected_checksum, checksum) + + def test_get_metadata_reads_a_line(self): + client = self._get_client() + client.get_metadata('some_key') + self.assertEqual(1, self.serial.readline.call_count) + + def test_get_metadata_returns_valid_value(self): + client = self._get_client() + value = client.get_metadata('some_key') + self.assertEqual(self.metadata_value, value) + + def test_get_metadata_throws_exception_for_incorrect_length(self): + self.response_parts['length'] = 0 + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get_metadata, 'some_key') + + def test_get_metadata_throws_exception_for_incorrect_crc(self): + self.response_parts['crc'] = 'deadbeef' + client = self._get_client() + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get_metadata, 'some_key') + + def test_get_metadata_throws_exception_for_request_id_mismatch(self): + self.response_parts['request_id'] = 'deadbeef' + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException, + client.get_metadata, 'some_key') + + def test_get_metadata_returns_None_if_value_not_found(self): + self.response_parts['payload'] = '' + self.response_parts['command'] = 'NOTFOUND' + self.response_parts['length'] = 17 + client = self._get_client() + client._checksum = lambda _: self.response_parts['crc'] + self.assertIsNone(client.get_metadata('some_key')) -- cgit v1.2.3 From 1828ac3fa151ec7ff761b34305ed5fb85a9020d1 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 15:54:14 +0000 Subject: Add logging to JoyentMetadataClient. --- cloudinit/sources/DataSourceSmartOS.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 694a011a..61dd044f 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -335,17 +335,23 @@ class JoyentMetadataClient(object): 'Request ID mismatch (expected: {0}; got {1}).'.format( expected_request_id, frame_data['request_id'])) if not frame_data.get('payload', None): + LOG.info('No value found.') return None - return util.b64d(frame_data['payload']) + value = util.b64d(frame_data['payload']) + LOG.info('Value "%s" found.', value) + return value def get_metadata(self, metadata_key): + LOG.info('Fetching metadata key "%s"...', metadata_key) request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) message_body = '{0} GET {1}'.format(request_id, util.b64e(metadata_key)) msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) + LOG.debug('Writing "%s" to serial port.', msg) self.serial.write(msg.encode('ascii')) response = self.serial.readline().decode('ascii') + LOG.debug('Read "%s" from serial port.', response) return self._get_value_from_frame(request_id, response) -- cgit v1.2.3 From d52feae7ad38670964edebb0eea5db2c8c80f760 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 15:54:19 +0000 Subject: Ensure that the serial console is always closed. --- cloudinit/sources/DataSourceSmartOS.py | 9 +++++---- tests/unittests/test_datasource/test_smartos.py | 12 ++++++++++++ 2 files changed, 17 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 61dd044f..237fc140 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -30,9 +30,11 @@ # Comments with "@datadictionary" are snippets of the definition import binascii +import contextlib import os import random import re + import serial from cloudinit import log as logging @@ -371,11 +373,10 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None, if not noun: return False - ser = get_serial(seed_device, seed_timeout) + with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: + client = JoyentMetadataClient(ser) + response = client.get_metadata(noun) - client = JoyentMetadataClient(ser) - response = client.get_metadata(noun) - ser.close() if response is None: return default diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 39991cc2..28b41eaf 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -409,6 +409,18 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) + @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient') + @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial') + def test_serial_console_closed_on_error(self, get_serial, metadata_client): + class OurException(Exception): + pass + metadata_client.side_effect = OurException + try: + DataSourceSmartOS.query_data('noun', 'device', 0) + except OurException: + pass + self.assertEqual(1, get_serial.return_value.close.call_count) + def apply_patches(patches): ret = [] -- cgit v1.2.3 From f4eb74ccc512d12afbb17dd9c678a5308ca64e9f Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 17:26:33 +0000 Subject: Switch logging from info to debug level. --- cloudinit/sources/DataSourceSmartOS.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 237fc140..d299cf26 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -337,14 +337,14 @@ class JoyentMetadataClient(object): 'Request ID mismatch (expected: {0}; got {1}).'.format( expected_request_id, frame_data['request_id'])) if not frame_data.get('payload', None): - LOG.info('No value found.') + LOG.debug('No value found.') return None value = util.b64d(frame_data['payload']) - LOG.info('Value "%s" found.', value) + LOG.debug('Value "%s" found.', value) return value def get_metadata(self, metadata_key): - LOG.info('Fetching metadata key "%s"...', metadata_key) + LOG.debug('Fetching metadata key "%s"...', metadata_key) request_id = '{0:08x}'.format(random.randint(0, 0xffffffff)) message_body = '{0} GET {1}'.format(request_id, util.b64e(metadata_key)) -- cgit v1.2.3 From 5ae131cad02f383c9f3109ad0f51d918787b0196 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 17:27:22 +0000 Subject: Add link to Joyent metadata specification. --- cloudinit/sources/DataSourceSmartOS.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index d299cf26..ec2d10ae 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -309,6 +309,12 @@ class JoyentMetadataFetchException(Exception): class JoyentMetadataClient(object): + """ + A client implementing v2 of the Joyent Metadata Protocol Specification. + + The full specification can be found at + http://eng.joyent.com/mdata/protocol.html + """ def __init__(self, serial): self.serial = serial -- cgit v1.2.3 From 5524fd6336a9162aef7687e84705114aa3eb47cd Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 25 Mar 2015 17:59:42 +0000 Subject: Compile SmartOS line-parsing regex once. --- cloudinit/sources/DataSourceSmartOS.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index ec2d10ae..c9b497df 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -315,6 +315,10 @@ class JoyentMetadataClient(object): The full specification can be found at http://eng.joyent.com/mdata/protocol.html """ + line_regex = re.compile( + r'V2 (?P\d+) (?P[0-9a-f]+)' + r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' + r'( (?P.+))?)') def __init__(self, serial): self.serial = serial @@ -324,11 +328,7 @@ class JoyentMetadataClient(object): binascii.crc32(body.encode('utf-8')) & 0xffffffff) def _get_value_from_frame(self, expected_request_id, frame): - regex = ( - r'V2 (?P\d+) (?P[0-9a-f]+)' - r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' - r'( (?P.+))?)') - frame_data = re.match(regex, frame).groupdict() + frame_data = self.line_regex.match(frame).groupdict() if int(frame_data['length']) != len(frame_data['body']): raise JoyentMetadataFetchException( 'Incorrect frame length given ({0} != {1}).'.format( -- cgit v1.2.3 From bf52085a1fa3529329a5c48097a12a6e9b93eb22 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 27 Mar 2015 15:19:51 -0400 Subject: NoCloud: the local portion of NoCloud incorrectly claimed datasources The intent has always been for the local datasource (NoCloud) to require the provider of metadata to provide 'dsmode=local'. If that wasn't found, then the default 'dsmode' would be 'net', and the NoCloudNet datasource would then find the data. The bug here was that the default 'net' wasn't being set when data was found on a local source. --- ChangeLog | 1 + cloudinit/sources/DataSourceNoCloud.py | 5 +++-- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index 32a4f5d6..70ba9ae3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -28,6 +28,7 @@ (LP: #1422388) - readurl, read_file_or_url returns bytes, user must convert as necessary - SmartOS: use v2 metadata service (LP: #1436417) [Daniel Watkins] + - NoCloud: fix local datasource claiming found without explicit dsmode 0.7.6: - open 0.7.6 - Enable vendordata on CloudSigma datasource (LP: #1303986) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index c26a645c..6a861af3 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -124,7 +124,7 @@ class DataSourceNoCloud(sources.DataSource): # that is more likely to be what is desired. If they want # dsmode of local, then they must specify that. if 'dsmode' not in mydata['meta-data']: - mydata['dsmode'] = "net" + mydata['meta-data']['dsmode'] = "net" LOG.debug("Using data from %s", dev) found.append(dev) @@ -193,7 +193,8 @@ class DataSourceNoCloud(sources.DataSource): self.vendordata = mydata['vendor-data'] return True - LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode']) + LOG.debug("%s: not claiming datasource, dsmode=%s", self, + mydata['meta-data']['dsmode']) return False -- cgit v1.2.3 From b6060efa4bd1de7f49f6aca3e97cfe77947f3a93 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 15 Apr 2015 12:13:17 +0100 Subject: Add unit tests for Azure hostname bouncing. Including minor refactoring to make mocking considerably easier. --- cloudinit/sources/DataSourceAzure.py | 28 ++-- tests/unittests/test_datasource/test_azure.py | 186 +++++++++++++++++++------- 2 files changed, 161 insertions(+), 53 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 6e030217..d4211fc4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -66,6 +66,14 @@ DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' +def get_hostname(hostname_command='hostname'): + return util.subp(hostname_command, capture=True)[0].strip() + + +def set_hostname(hostname, hostname_command='hostname'): + util.subp([hostname_command, hostname]) + + class DataSourceAzureNet(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -313,13 +321,22 @@ def handle_set_hostname(enabled, hostname, cfg): hostname_command=cfg['hostname_command']) +def perform_hostname_bounce(command, env): + shell = not isinstance(command, (list, tuple)) + # capture=False, see comments in bug 1202758 and bug 1206164. + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + get_uptime=True, func=util.subp, + kwargs={'args': command, 'shell': shell, 'capture': False, + 'env': env}) + + def apply_hostname_bounce(hostname, policy, interface, command, hostname_command="hostname"): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command - prev_hostname = util.subp(hostname_command, capture=True)[0].strip() + prev_hostname = get_hostname() - util.subp([hostname_command, hostname]) + set_hostname(hostname, hostname_command) msg = ("phostname=%s hostname=%s policy=%s interface=%s" % (prev_hostname, hostname, policy, interface)) @@ -341,12 +358,7 @@ def apply_hostname_bounce(hostname, policy, interface, command, command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=util.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) + perform_hostname_bounce(command, env) def crtfile_to_pubkey(fname): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 8112c69b..3adf9bdf 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -116,9 +116,6 @@ class TestAzureDataSource(TestCase): data['iid_from_shared_cfg'] = path return 'i-my-azure-id' - def _apply_hostname_bounce(**kwargs): - data['apply_hostname_bounce'] = kwargs - if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -132,7 +129,9 @@ class TestAzureDataSource(TestCase): (mod, 'wait_for_files', _wait_for_files), (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), (mod, 'iid_from_shared_config', _iid_from_shared_config), - (mod, 'apply_hostname_bounce', _apply_hostname_bounce), + (mod, 'perform_hostname_bounce', mock.MagicMock()), + (mod, 'get_hostname', mock.MagicMock()), + (mod, 'set_hostname', mock.MagicMock()), ]) dsrc = mod.DataSourceAzureNet( @@ -272,47 +271,6 @@ class TestAzureDataSource(TestCase): for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) - def test_disabled_bounce(self): - pass - - def test_apply_bounce_call_1(self): - # hostname needs to get through to apply_hostname_bounce - odata = {'HostName': 'my-random-hostname'} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - self._get_ds(data).get_data() - self.assertIn('hostname', data['apply_hostname_bounce']) - self.assertEqual(data['apply_hostname_bounce']['hostname'], - odata['HostName']) - - def test_apply_bounce_call_configurable(self): - # hostname_bounce should be configurable in datasource cfg - cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off', - 'command': 'my-bounce-command', - 'hostname_command': 'my-hostname-command'}} - odata = {'HostName': "xhost", - 'dscfg': {'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - self._get_ds(data).get_data() - - for k in cfg['hostname_bounce']: - self.assertIn(k, data['apply_hostname_bounce']) - - for k, v in cfg['hostname_bounce'].items(): - self.assertEqual(data['apply_hostname_bounce'][k], v) - - def test_set_hostname_disabled(self): - # config specifying set_hostname off should not bounce - cfg = {'set_hostname': False} - odata = {'HostName': "xhost", - 'dscfg': {'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64'}} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - self._get_ds(data).get_data() - - self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A") - def test_default_ephemeral(self): # make sure the ephemeral device works odata = {} @@ -425,6 +383,144 @@ class TestAzureDataSource(TestCase): load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))) +class TestAzureBounce(TestCase): + + def mock_out_azure_moving_parts(self): + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'invoke_agent')) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'wait_for_files')) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'iid_from_shared_config', + mock.MagicMock(return_value='i-my-azure-id'))) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', + mock.MagicMock(return_value=[]))) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'find_ephemeral_disk', + mock.MagicMock(return_value=None))) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'find_ephemeral_part', + mock.MagicMock(return_value=None))) + + def setUp(self): + super(TestAzureBounce, self).setUp() + self.tmp = tempfile.mkdtemp() + self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent') + self.paths = helpers.Paths({'cloud_dir': self.tmp}) + self.addCleanup(shutil.rmtree, self.tmp) + DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + self.patches = ExitStack() + self.mock_out_azure_moving_parts() + self.get_hostname = self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'get_hostname')) + self.set_hostname = self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'set_hostname')) + self.subp = self.patches.enter_context( + mock.patch('cloudinit.sources.DataSourceAzure.util.subp')) + + def tearDown(self): + self.patches.close() + + def _get_ds(self, ovfcontent=None): + if ovfcontent is not None: + populate_dir(os.path.join(self.paths.seed_dir, "azure"), + {'ovf-env.xml': ovfcontent}) + return DataSourceAzure.DataSourceAzureNet( + {}, distro=None, paths=self.paths) + + def get_ovf_env_with_dscfg(self, hostname, cfg): + odata = { + 'HostName': hostname, + 'dscfg': { + 'text': b64e(yaml.dump(cfg)), + 'encoding': 'base64' + } + } + return construct_valid_ovf_env(data=odata) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_disabled_bounce_does_not_perform_bounce( + self, perform_hostname_bounce): + cfg = {'hostname_bounce': {'policy': 'off'}} + self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_unchanged_hostname_does_not_perform_bounce( + self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + self.assertEqual(0, perform_hostname_bounce.call_count) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_force_performs_bounce_regardless(self, perform_hostname_bounce): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'force'}} + self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_different_hostnames_sets_hostname(self): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data() + self.assertEqual(expected_hostname, + self.set_hostname.call_args_list[0][0][0]) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_different_hostnames_performs_bounce( + self, perform_hostname_bounce): + expected_hostname = 'azure-expected-host-name' + self.get_hostname.return_value = 'default-host-name' + self._get_ds( + self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data() + self.assertEqual(1, perform_hostname_bounce.call_count) + + def test_environment_correct_for_bounce_command(self): + interface = 'int0' + hostname = 'my-new-host' + old_hostname = 'my-old-host' + self.get_hostname.return_value = old_hostname + cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} + data = self.get_ovf_env_with_dscfg(hostname, cfg) + self._get_ds(data).get_data() + self.assertEqual(1, self.subp.call_count) + bounce_env = self.subp.call_args[1]['env'] + self.assertEqual(interface, bounce_env['interface']) + self.assertEqual(hostname, bounce_env['hostname']) + self.assertEqual(old_hostname, bounce_env['old_hostname']) + + def test_default_bounce_command_used_by_default(self): + cmd = 'default-bounce-command' + DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd + cfg = {'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + self.assertEqual(1, self.subp.call_count) + bounce_args = self.subp.call_args[1]['args'] + self.assertEqual(cmd, bounce_args) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_set_hostname_option_can_disable_bounce( + self, perform_hostname_bounce): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, perform_hostname_bounce.call_count) + + def test_set_hostname_option_can_disable_hostname_set(self): + cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}} + data = self.get_ovf_env_with_dscfg('some-hostname', cfg) + self._get_ds(data).get_data() + + self.assertEqual(0, self.set_hostname.call_count) + + class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) -- cgit v1.2.3 From b8706d7dc930c5c9dce1f96a000c66e5dda14e02 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 15 Apr 2015 12:13:17 +0100 Subject: Reset host name after bounce has allowed walinuxagent to run successfully. --- cloudinit/sources/DataSourceAzure.py | 134 +++++++++++++------------- tests/unittests/test_datasource/test_azure.py | 31 ++++++ 2 files changed, 99 insertions(+), 66 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d4211fc4..a19d9ca2 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,6 +17,7 @@ # along with this program. If not, see . import base64 +import contextlib import crypt import fnmatch import os @@ -74,6 +75,28 @@ def set_hostname(hostname, hostname_command='hostname'): util.subp([hostname_command, hostname]) +@contextlib.contextmanager +def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): + """ + Set a temporary hostname, restoring the previous hostname on exit. + + Will have the value of the previous hostname when used as a context + manager, or None if the hostname was not changed. + """ + policy = cfg['hostname_bounce']['policy'] + previous_hostname = get_hostname(hostname_command) + if (not util.is_true(cfg.get('set_hostname')) + or util.is_false(policy) + or (previous_hostname == temp_hostname and policy != 'force')): + yield None + return + set_hostname(temp_hostname, hostname_command) + try: + yield previous_hostname + finally: + set_hostname(previous_hostname, hostname_command) + + class DataSourceAzureNet(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -162,33 +185,40 @@ class DataSourceAzureNet(sources.DataSource): # the directory to be protected. write_files(ddir, files, dirmode=0o700) - # handle the hostname 'publishing' - try: - handle_set_hostname(mycfg.get('set_hostname'), - self.metadata.get('local-hostname'), - mycfg['hostname_bounce']) - except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) - util.logexc(LOG, "handling set_hostname failed") - - try: - invoke_agent(mycfg['agent_command']) - except util.ProcessExecutionError: - # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", - mycfg['agent_command']) - - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] - - fp_files = [] - for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] + temp_hostname = self.metadata.get('local-hostname') + hostname_command = mycfg['hostname_bounce']['hostname_command'] + with temporary_hostname(temp_hostname, mycfg, + hostname_command=hostname_command) \ + as previous_hostname: + if (previous_hostname is not None + and util.is_true(mycfg.get('set_hostname'))): + cfg = mycfg['hostname_bounce'] + try: + perform_hostname_bounce(hostname=temp_hostname, + cfg=cfg, + prev_hostname=previous_hostname) + except Exception as e: + LOG.warn("Failed publishing hostname: %s", e) + util.logexc(LOG, "handling set_hostname failed") - missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", - func=wait_for_files, - args=(wait_for + fp_files,)) + try: + invoke_agent(mycfg['agent_command']) + except util.ProcessExecutionError: + # claim the datasource even if the command failed + util.logexc(LOG, "agent command '%s' failed.", + mycfg['agent_command']) + + shcfgxml = os.path.join(ddir, "SharedConfig.xml") + wait_for = [shcfgxml] + + fp_files = [] + for pk in self.cfg.get('_pubkeys', []): + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) @@ -307,48 +337,15 @@ def support_new_ephemeral(cfg): return mod_list -def handle_set_hostname(enabled, hostname, cfg): - if not util.is_true(enabled): - return - - if not hostname: - LOG.warn("set_hostname was true but no local-hostname") - return - - apply_hostname_bounce(hostname=hostname, policy=cfg['policy'], - interface=cfg['interface'], - command=cfg['command'], - hostname_command=cfg['hostname_command']) - - -def perform_hostname_bounce(command, env): - shell = not isinstance(command, (list, tuple)) - # capture=False, see comments in bug 1202758 and bug 1206164. - util.log_time(logfunc=LOG.debug, msg="publishing hostname", - get_uptime=True, func=util.subp, - kwargs={'args': command, 'shell': shell, 'capture': False, - 'env': env}) - - -def apply_hostname_bounce(hostname, policy, interface, command, - hostname_command="hostname"): +def perform_hostname_bounce(hostname, cfg, prev_hostname): # set the hostname to 'hostname' if it is not already set to that. # then, if policy is not off, bounce the interface using command - prev_hostname = get_hostname() - - set_hostname(hostname, hostname_command) - - msg = ("phostname=%s hostname=%s policy=%s interface=%s" % - (prev_hostname, hostname, policy, interface)) - - if util.is_false(policy): - LOG.debug("pubhname: policy false, skipping [%s]", msg) - return - - if prev_hostname == hostname and policy != "force": - LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg) - return + command = cfg['command'] + interface = cfg['interface'] + policy = cfg['policy'] + msg = ("hostname=%s policy=%s interface=%s" % + (hostname, policy, interface)) env = os.environ.copy() env['interface'] = interface env['hostname'] = hostname @@ -358,7 +355,12 @@ def apply_hostname_bounce(hostname, policy, interface, command, command = BOUNCE_COMMAND LOG.debug("pubhname: publishing hostname [%s]", msg) - perform_hostname_bounce(command, env) + shell = not isinstance(command, (list, tuple)) + # capture=False, see comments in bug 1202758 and bug 1206164. + util.log_time(logfunc=LOG.debug, msg="publishing hostname", + get_uptime=True, func=util.subp, + kwargs={'args': command, 'shell': shell, 'capture': False, + 'env': env}) def crtfile_to_pubkey(fname): diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3adf9bdf..7e789853 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -439,6 +439,11 @@ class TestAzureBounce(TestCase): } return construct_valid_ovf_env(data=odata) + def test_disabled_bounce_does_not_change_hostname(self): + cfg = {'hostname_bounce': {'policy': 'off'}} + self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() + self.assertEqual(0, self.set_hostname.call_count) + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_disabled_bounce_does_not_perform_bounce( self, perform_hostname_bounce): @@ -446,6 +451,13 @@ class TestAzureBounce(TestCase): self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data() self.assertEqual(0, perform_hostname_bounce.call_count) + def test_same_hostname_does_not_change_hostname(self): + host_name = 'unchanged-host-name' + self.get_hostname.return_value = host_name + cfg = {'hostname_bounce': {'policy': 'yes'}} + self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data() + self.assertEqual(0, self.set_hostname.call_count) + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') def test_unchanged_hostname_does_not_perform_bounce( self, perform_hostname_bounce): @@ -480,6 +492,25 @@ class TestAzureBounce(TestCase): self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data() self.assertEqual(1, perform_hostname_bounce.call_count) + def test_different_hostnames_sets_hostname_back(self): + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})).get_data() + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + + @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce') + def test_failure_in_bounce_still_resets_host_name( + self, perform_hostname_bounce): + perform_hostname_bounce.side_effect = Exception + initial_host_name = 'default-host-name' + self.get_hostname.return_value = initial_host_name + self._get_ds( + self.get_ovf_env_with_dscfg('some-host-name', {})).get_data() + self.assertEqual(initial_host_name, + self.set_hostname.call_args_list[-1][0][0]) + def test_environment_correct_for_bounce_command(self): interface = 'int0' hostname = 'my-new-host' -- cgit v1.2.3 From 844ebbee112143e85fb46b4b5ed649729f903d2c Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 20 Apr 2015 15:23:57 +0100 Subject: Refactor GCE metadata fetching to use a helper class. --- cloudinit/sources/DataSourceGCE.py | 69 ++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 33 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 608c07f1..255f5f45 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = { REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname') +class GoogleMetadataFetcher(object): + headers = {'X-Google-Metadata-Request': True} + + def __init__(self, metadata_address): + self.metadata_address = metadata_address + + def get_value(self, path, is_text): + value = None + try: + resp = url_helper.readurl(url=self.metadata_address + path, + headers=self.headers) + except url_helper.UrlError as exc: + msg = "url %s raised exception %s" + LOG.debug(msg, path, exc) + else: + if resp.code == 200: + if is_text: + value = util.decode_binary(resp.contents) + else: + value = resp.contents + else: + LOG.debug("url %s returned code %s", path, resp.code) + return value + + class DataSourceGCE(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -50,9 +75,6 @@ class DataSourceGCE(sources.DataSource): return public_key def get_data(self): - # GCE metadata server requires a custom header since v1 - headers = {'X-Google-Metadata-Request': True} - # url_map: (our-key, path, required, is_text) url_map = [ ('instance-id', 'instance/id', True, True), @@ -69,40 +91,21 @@ class DataSourceGCE(sources.DataSource): LOG.debug("%s is not resolvable", self.metadata_address) return False + metadata_fetcher = GoogleMetadataFetcher(self.metadata_address) # iterate over url_map keys to get metadata items found = False for (mkey, path, required, is_text) in url_map: - try: - resp = url_helper.readurl(url=self.metadata_address + path, - headers=headers) - if resp.code == 200: - found = True - if is_text: - self.metadata[mkey] = util.decode_binary(resp.contents) - else: - self.metadata[mkey] = resp.contents + value = metadata_fetcher.get_value(path, is_text) + if value: + found = True + if required and value is None: + msg = "required url %s returned nothing. not GCE" + if not found: + LOG.debug(msg, path) else: - if required: - msg = "required url %s returned code %s. not GCE" - if not found: - LOG.debug(msg, path, resp.code) - else: - LOG.warn(msg, path, resp.code) - return False - else: - self.metadata[mkey] = None - except url_helper.UrlError as e: - if required: - msg = "required url %s raised exception %s. not GCE" - if not found: - LOG.debug(msg, path, e) - else: - LOG.warn(msg, path, e) - return False - msg = "Failed to get %s metadata item: %s." - LOG.debug(msg, path, e) - - self.metadata[mkey] = None + LOG.warn(msg, path) + return False + self.metadata[mkey] = value if self.metadata['public-keys']: lines = self.metadata['public-keys'].splitlines() -- cgit v1.2.3 From 47eb1c4b52a2f5f4f8ea657918acd94209668bd7 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 20 Apr 2015 15:24:00 +0100 Subject: Rename found variable in GCE data source. --- cloudinit/sources/DataSourceGCE.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 255f5f45..9cf2f56e 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -93,14 +93,14 @@ class DataSourceGCE(sources.DataSource): metadata_fetcher = GoogleMetadataFetcher(self.metadata_address) # iterate over url_map keys to get metadata items - found = False + running_on_gce = False for (mkey, path, required, is_text) in url_map: value = metadata_fetcher.get_value(path, is_text) if value: - found = True + running_on_gce = True if required and value is None: msg = "required url %s returned nothing. not GCE" - if not found: + if not running_on_gce: LOG.debug(msg, path) else: LOG.warn(msg, path) @@ -119,7 +119,7 @@ class DataSourceGCE(sources.DataSource): else: LOG.warn('unknown user-data-encoding: %s, ignoring', encoding) - return found + return running_on_gce @property def launch_index(self): -- cgit v1.2.3 From 6e84c05d2dc402de8cc4ae414af8657b97317218 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 20 Apr 2015 15:24:21 +0100 Subject: Support multiple metadata paths for metadata keys in GCE data source. --- cloudinit/sources/DataSourceGCE.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 9cf2f56e..1a133c28 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -77,12 +77,12 @@ class DataSourceGCE(sources.DataSource): def get_data(self): # url_map: (our-key, path, required, is_text) url_map = [ - ('instance-id', 'instance/id', True, True), - ('availability-zone', 'instance/zone', True, True), - ('local-hostname', 'instance/hostname', True, True), - ('public-keys', 'project/attributes/sshKeys', False, True), - ('user-data', 'instance/attributes/user-data', False, False), - ('user-data-encoding', 'instance/attributes/user-data-encoding', + ('instance-id', ('instance/id',), True, True), + ('availability-zone', ('instance/zone',), True, True), + ('local-hostname', ('instance/hostname',), True, True), + ('public-keys', ('project/attributes/sshKeys',), False, True), + ('user-data', ('instance/attributes/user-data',), False, False), + ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), ] @@ -94,16 +94,20 @@ class DataSourceGCE(sources.DataSource): metadata_fetcher = GoogleMetadataFetcher(self.metadata_address) # iterate over url_map keys to get metadata items running_on_gce = False - for (mkey, path, required, is_text) in url_map: - value = metadata_fetcher.get_value(path, is_text) + for (mkey, paths, required, is_text) in url_map: + value = None + for path in paths: + new_value = metadata_fetcher.get_value(path, is_text) + if new_value is not None: + value = new_value if value: running_on_gce = True if required and value is None: - msg = "required url %s returned nothing. not GCE" + msg = "required key %s returned nothing. not GCE" if not running_on_gce: - LOG.debug(msg, path) + LOG.debug(msg, mkey) else: - LOG.warn(msg, path) + LOG.warn(msg, mkey) return False self.metadata[mkey] = value -- cgit v1.2.3 From 4fc65f02ae3fbf1a2062e6169ee39b5c5d5e23bc Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 20 Apr 2015 15:24:22 +0100 Subject: GCE instance-level SSH keys override project-level keys. (LP: #1403617) --- cloudinit/sources/DataSourceGCE.py | 3 ++- tests/unittests/test_datasource/test_gce.py | 38 ++++++++++++++++++++++++++--- 2 files changed, 36 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 1a133c28..f4ed915d 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -80,7 +80,8 @@ class DataSourceGCE(sources.DataSource): ('instance-id', ('instance/id',), True, True), ('availability-zone', ('instance/zone',), True, True), ('local-hostname', ('instance/hostname',), True, True), - ('public-keys', ('project/attributes/sshKeys',), False, True), + ('public-keys', ('project/attributes/sshKeys', + 'instance/attributes/sshKeys'), False, True), ('user-data', ('instance/attributes/user-data',), False, False), ('user-data-encoding', ('instance/attributes/user-data-encoding',), False, True), diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 540a55d0..1fb100f7 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -113,10 +113,6 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.assertEqual(GCE_META.get('instance/attributes/user-data'), self.ds.get_userdata_raw()) - # we expect a list of public ssh keys with user names stripped - self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'], - self.ds.get_public_ssh_keys()) - # test partial metadata (missing user-data in particular) @httpretty.activate def test_metadata_partial(self): @@ -152,3 +148,37 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): body=_new_request_callback(meta)) self.assertEqual(False, self.ds.get_data()) httpretty.reset() + + @httpretty.activate + def test_project_level_ssh_keys_are_used(self): + httpretty.register_uri(httpretty.GET, MD_URL_RE, + body=_new_request_callback()) + self.ds.get_data() + + # we expect a list of public ssh keys with user names stripped + self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'], + self.ds.get_public_ssh_keys()) + + @httpretty.activate + def test_instance_level_ssh_keys_are_used(self): + key_content = 'ssh-rsa JustAUser root@server' + meta = GCE_META.copy() + meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + + httpretty.register_uri(httpretty.GET, MD_URL_RE, + body=_new_request_callback(meta)) + self.ds.get_data() + + self.assertIn(key_content, self.ds.get_public_ssh_keys()) + + @httpretty.activate + def test_instance_level_keys_replace_project_level_keys(self): + key_content = 'ssh-rsa JustAUser root@server' + meta = GCE_META.copy() + meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content) + + httpretty.register_uri(httpretty.GET, MD_URL_RE, + body=_new_request_callback(meta)) + self.ds.get_data() + + self.assertEqual([key_content], self.ds.get_public_ssh_keys()) -- cgit v1.2.3 From 96854d720d4bd356181acfa093744599a807ea8e Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 1 May 2015 05:38:56 -0400 Subject: fix 'Make pyflakes' --- Makefile | 2 +- cloudinit/config/cc_apt_pipelining.py | 2 +- cloudinit/config/cc_snappy.py | 2 -- cloudinit/sources/DataSourceOpenNebula.py | 1 - tests/unittests/test_datasource/test_smartos.py | 2 -- tests/unittests/test_handler/test_handler_apt_configure.py | 1 - tests/unittests/test_handler/test_handler_snappy.py | 5 ----- tests/unittests/test_templating.py | 5 +---- tools/hacking.py | 2 +- tools/validate-yaml.py | 3 +-- 10 files changed, 5 insertions(+), 20 deletions(-) (limited to 'cloudinit/sources') diff --git a/Makefile b/Makefile index 009257ca..bb0c5253 100644 --- a/Makefile +++ b/Makefile @@ -20,7 +20,7 @@ pep8: @$(CWD)/tools/run-pep8 $(PY_FILES) pyflakes: - pyflakes $(PY_FILES) + @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES) pip-requirements: @echo "Installing cloud-init dependencies..." diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py index e5629175..40c32c84 100644 --- a/cloudinit/config/cc_apt_pipelining.py +++ b/cloudinit/config/cc_apt_pipelining.py @@ -43,7 +43,7 @@ def handle(_name, cfg, _cloud, log, _args): write_apt_snippet("0", log, DEFAULT_FILE) elif apt_pipe_value_s in ("none", "unchanged", "os"): return - elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]: + elif apt_pipe_value_s in [str(b) for b in range(0, 6)]: write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE) else: log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value) diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py index bfe76558..7aaec94a 100644 --- a/cloudinit/config/cc_snappy.py +++ b/cloudinit/config/cc_snappy.py @@ -42,12 +42,10 @@ Example config: """ from cloudinit import log as logging -from cloudinit import templater from cloudinit import util from cloudinit.settings import PER_INSTANCE import glob -import six import tempfile import os diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 61709c1b..ac2c3b45 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -24,7 +24,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import base64 import os import pwd import re diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 28b41eaf..adee9019 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -36,8 +36,6 @@ from binascii import crc32 import serial import six -import six - from cloudinit import helpers as c_helpers from cloudinit.sources import DataSourceSmartOS from cloudinit.util import b64e diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py index 02cad8b2..895728b3 100644 --- a/tests/unittests/test_handler/test_handler_apt_configure.py +++ b/tests/unittests/test_handler/test_handler_apt_configure.py @@ -7,7 +7,6 @@ import os import re import shutil import tempfile -import unittest class TestAptProxyConfig(TestCase): diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index f3109bac..eceb14d9 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -38,7 +38,6 @@ class TestInstallPackages(t_help.TestCase): if 'args' not in kwargs: kwargs['args'] = args[0] self.subp_called.append(kwargs) - snap_cmds = [] args = kwargs['args'] # here we basically parse the snappy command invoked # and append to snapcmds a list of (mode, pkg, config) @@ -117,9 +116,6 @@ class TestInstallPackages(t_help.TestCase): def test_package_ops_common_filename(self): # fish package name from filename # package names likely look like: pkgname.namespace_version_arch.snap - fname = "xkcd-webserver.canonical_0.3.4_all.snap" - name = "xkcd-webserver.canonical" - shortname = "xkcd-webserver" # find filenames self.populate_tmp( @@ -165,7 +161,6 @@ class TestInstallPackages(t_help.TestCase): 'ubuntu-core': {'c1': 'c2'}, 'notinstalled.smoser': {'s1': 's2'}, } - cfg = {'config-example-k1': 'config-example-k2'} ret = get_package_ops( packages=['config-example.canonical'], configs=cfgs, installed=['config-example.smoser', 'pkg1.canonical', diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index cf7c03b0..0c19a2c2 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -18,10 +18,6 @@ from __future__ import print_function -import sys -import six -import unittest - from . import helpers as test_helpers import textwrap @@ -30,6 +26,7 @@ from cloudinit import templater try: import Cheetah HAS_CHEETAH = True + Cheetah # make pyflakes happy, as Cheetah is not used here except ImportError: HAS_CHEETAH = False diff --git a/tools/hacking.py b/tools/hacking.py index e7797564..3175df38 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -128,7 +128,7 @@ def cloud_docstring_multiline_end(physical_line): """ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start if (pos != -1 and len(physical_line) == pos): - print physical_line + print(physical_line) if (physical_line[pos + 3] == ' '): return (pos, "N403: multi line docstring end on new line") diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index eda59cb8..6e164590 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -4,7 +4,6 @@ """ import sys - import yaml @@ -17,7 +16,7 @@ if __name__ == "__main__": yaml.safe_load(fh.read()) fh.close() sys.stdout.write(" - ok\n") - except Exception, e: + except Exception as e: sys.stdout.write(" - bad (%s)\n" % (e)) bads += 1 if bads > 0: -- cgit v1.2.3 From 6ddf7beb112f016be7ebd6fe296de6eaaf3aa9ca Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 7 May 2015 14:46:47 +0100 Subject: Implement basic replacement for walinuxagent in Azure data source. --- cloudinit/sources/DataSourceAzure.py | 292 +++++++++++++++++++---- tests/unittests/test_datasource/test_azure.py | 331 ++++++++++++++++++++++++++ 2 files changed, 574 insertions(+), 49 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a19d9ca2..bd3c742b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,8 +22,14 @@ import crypt import fnmatch import os import os.path +import re +import socket +import struct +import tempfile import time +from contextlib import contextmanager from xml.dom import minidom +from xml.etree import ElementTree from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS @@ -34,13 +40,11 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} -AGENT_START = ['service', 'walinuxagent', 'start'] BOUNCE_COMMAND = ['sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { - 'agent_command': AGENT_START, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { @@ -66,6 +70,231 @@ BUILTIN_CLOUD_CONFIG = { DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' +REPORT_READY_XML_TEMPLATE = """\ + + + {incarnation} + + {container_id} + + + {instance_id} + + Ready + + + + +""" + + +@contextmanager +def cd(newdir): + prevdir = os.getcwd() + os.chdir(os.path.expanduser(newdir)) + try: + yield + finally: + os.chdir(prevdir) + + +class AzureEndpointHttpClient(object): + + headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def __init__(self, certificate): + self.extra_secure_headers = { + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": certificate, + } + + def get(self, url, secure=False): + headers = self.headers + if secure: + headers = self.headers.copy() + headers.update(self.extra_secure_headers) + return util.read_file_or_url(url, headers=headers) + + def post(self, url, data=None, extra_headers=None): + headers = self.headers + if extra_headers is not None: + headers = self.headers.copy() + headers.update(extra_headers) + return util.read_file_or_url(url, data=data, headers=headers) + + +def find_endpoint(): + content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') + value = None + for line in content.splitlines(): + if 'unknown-245' in line: + value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + if value is None: + raise Exception('No endpoint found in DHCP config.') + if ':' in value: + hex_string = '' + for hex_pair in value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) + else: + value = value.encode('utf-8') + return socket.inet_ntoa(value) + + +class GoalState(object): + + def __init__(self, xml, http_client): + self.http_client = http_client + self.root = ElementTree.fromstring(xml) + + def _text_from_xpath(self, xpath): + element = self.root.find(xpath) + if element is not None: + return element.text + return None + + @property + def container_id(self): + return self._text_from_xpath('./Container/ContainerId') + + @property + def incarnation(self): + return self._text_from_xpath('./Incarnation') + + @property + def instance_id(self): + return self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance/InstanceId') + + @property + def shared_config_xml(self): + url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' + '/Configuration/SharedConfig') + return self.http_client.get(url).contents + + @property + def certificates_xml(self): + url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' + '/Configuration/Certificates') + if url is not None: + return self.http_client.get(url, secure=True).contents + return None + + +class OpenSSLManager(object): + + certificate_names = { + 'private_key': 'TransportPrivate.pem', + 'certificate': 'TransportCert.pem', + } + + def __init__(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.certificate = None + self.generate_certificate() + + def generate_certificate(self): + if self.certificate is not None: + return + with cd(self.tmpdir.name): + util.subp([ + 'openssl', 'req', '-x509', '-nodes', '-subj', + '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', + '-keyout', self.certificate_names['private_key'], + '-out', self.certificate_names['certificate'], + ]) + certificate = '' + for line in open(self.certificate_names['certificate']): + if "CERTIFICATE" not in line: + certificate += line.rstrip() + self.certificate = certificate + + def parse_certificates(self, certificates_xml): + tag = ElementTree.fromstring(certificates_xml).find( + './/Data') + certificates_content = tag.text + lines = [ + b'MIME-Version: 1.0', + b'Content-Disposition: attachment; filename="Certificates.p7m"', + b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"', + b'Content-Transfer-Encoding: base64', + b'', + certificates_content.encode('utf-8'), + ] + with cd(self.tmpdir.name): + with open('Certificates.p7m', 'wb') as f: + f.write(b'\n'.join(lines)) + out, _ = util.subp( + 'openssl cms -decrypt -in Certificates.p7m -inkey' + ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' + ' -password pass:'.format(**self.certificate_names), + shell=True) + private_keys, certificates = [], [] + current = [] + for line in out.splitlines(): + current.append(line) + if re.match(r'[-]+END .*?KEY[-]+$', line): + private_keys.append('\n'.join(current)) + current = [] + elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): + certificates.append('\n'.join(current)) + current = [] + keys = [] + for certificate in certificates: + with cd(self.tmpdir.name): + public_key, _ = util.subp( + 'openssl x509 -noout -pubkey |' + 'ssh-keygen -i -m PKCS8 -f /dev/stdin', + data=certificate, + shell=True) + keys.append(public_key) + return keys + + +class WALinuxAgentShim(object): + + def __init__(self): + self.endpoint = find_endpoint() + self.goal_state = None + self.openssl_manager = OpenSSLManager() + self.http_client = AzureEndpointHttpClient( + self.openssl_manager.certificate) + self.values = {} + + def register_with_azure_and_fetch_data(self): + LOG.info('Registering with Azure...') + for i in range(10): + try: + response = self.http_client.get( + 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) + except Exception: + time.sleep(i + 1) + else: + break + self.goal_state = GoalState(response.contents, self.http_client) + self.public_keys = [] + if self.goal_state.certificates_xml is not None: + self.public_keys = self.openssl_manager.parse_certificates( + self.goal_state.certificates_xml) + self._report_ready() + + def _report_ready(self): + document = REPORT_READY_XML_TEMPLATE.format( + incarnation=self.goal_state.incarnation, + container_id=self.goal_state.container_id, + instance_id=self.goal_state.instance_id, + ) + self.http_client.post( + "http://{}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + def get_hostname(hostname_command='hostname'): return util.subp(hostname_command, capture=True)[0].strip() @@ -185,53 +414,17 @@ class DataSourceAzureNet(sources.DataSource): # the directory to be protected. write_files(ddir, files, dirmode=0o700) - temp_hostname = self.metadata.get('local-hostname') - hostname_command = mycfg['hostname_bounce']['hostname_command'] - with temporary_hostname(temp_hostname, mycfg, - hostname_command=hostname_command) \ - as previous_hostname: - if (previous_hostname is not None - and util.is_true(mycfg.get('set_hostname'))): - cfg = mycfg['hostname_bounce'] - try: - perform_hostname_bounce(hostname=temp_hostname, - cfg=cfg, - prev_hostname=previous_hostname) - except Exception as e: - LOG.warn("Failed publishing hostname: %s", e) - util.logexc(LOG, "handling set_hostname failed") + shim = WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() - try: - invoke_agent(mycfg['agent_command']) - except util.ProcessExecutionError: - # claim the datasource even if the command failed - util.logexc(LOG, "agent command '%s' failed.", - mycfg['agent_command']) - - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] - - fp_files = [] - for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] - - missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", - func=wait_for_files, - args=(wait_for + fp_files,)) - if len(missing): - LOG.warn("Did not find files, but going on: %s", missing) - - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") - else: - try: - self.metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) + try: + self.metadata['instance-id'] = iid_from_shared_config_content( + shim.goal_state.shared_config_xml) + except ValueError as e: + LOG.warn( + "failed to get instance id in %s: %s", shim.shared_config, e) - pubkeys = pubkeys_from_crt_files(fp_files) - self.metadata['public-keys'] = pubkeys + self.metadata['public-keys'] = shim.public_keys found_ephemeral = find_ephemeral_disk() if found_ephemeral: @@ -363,10 +556,11 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname): 'env': env}) -def crtfile_to_pubkey(fname): +def crtfile_to_pubkey(fname, data=None): pipeline = ('openssl x509 -noout -pubkey < "$0" |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin') - (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True) + (out, _err) = util.subp(['sh', '-c', pipeline, fname], + capture=True, data=data) return out.rstrip() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 7e789853..dc7f2663 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -15,11 +15,48 @@ except ImportError: import crypt import os import stat +import struct import yaml import shutil import tempfile import unittest +from cloudinit import url_helper + + +GOAL_STATE_TEMPLATE = """\ + + + 2012-11-30 + {incarnation} + + Started + 300000 + + 16001 + + FALSE + + + {container_id} + + + {instance_id} + Started + + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=hostingEnvironmentConfig&incarnation=1 + {shared_config_url} + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=extensionsConfig&incarnation=1 + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=fullConfig&incarnation=1 + {certificates_url} + 68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml + + + + + +""" + def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): if data is None: @@ -579,3 +616,297 @@ class TestReadAzureSharedConfig(unittest.TestCase): """ ret = DataSourceAzure.iid_from_shared_config_content(xml) self.assertEqual("MY_INSTANCE_ID", ret) + + +class TestFindEndpoint(TestCase): + + def setUp(self): + super(TestFindEndpoint, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.load_file = patches.enter_context( + mock.patch.object(DataSourceAzure.util, 'load_file')) + + def test_missing_file(self): + self.load_file.side_effect = IOError + self.assertRaises(IOError, DataSourceAzure.find_endpoint) + + def test_missing_special_azure_line(self): + self.load_file.return_value = '' + self.assertRaises(Exception, DataSourceAzure.find_endpoint) + + def _build_lease_content(self, ip_address, use_hex=True): + ip_address_repr = ':'.join( + [hex(int(part)).replace('0x', '') + for part in ip_address.split('.')]) + if not use_hex: + ip_address_repr = struct.pack( + '>L', int(ip_address_repr.replace(':', ''), 16)) + ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8')) + return '\n'.join([ + 'lease {', + ' interface "eth0";', + ' option unknown-245 {0};'.format(ip_address_repr), + '}']) + + def test_hex_string(self): + ip_address = '98.76.54.32' + file_content = self._build_lease_content(ip_address) + self.load_file.return_value = file_content + self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + + def test_hex_string_with_single_character_part(self): + ip_address = '4.3.2.1' + file_content = self._build_lease_content(ip_address) + self.load_file.return_value = file_content + self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + + def test_packed_string(self): + ip_address = '98.76.54.32' + file_content = self._build_lease_content(ip_address, use_hex=False) + self.load_file.return_value = file_content + self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + + def test_latest_lease_used(self): + ip_addresses = ['4.3.2.1', '98.76.54.32'] + file_content = '\n'.join([self._build_lease_content(ip_address) + for ip_address in ip_addresses]) + self.load_file.return_value = file_content + self.assertEqual(ip_addresses[-1], DataSourceAzure.find_endpoint()) + + +class TestGoalStateParsing(TestCase): + + default_parameters = { + 'incarnation': 1, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId', + 'shared_config_url': 'MySharedConfigUrl', + 'certificates_url': 'MyCertificatesUrl', + } + + def _get_goal_state(self, http_client=None, **kwargs): + if http_client is None: + http_client = mock.MagicMock() + parameters = self.default_parameters.copy() + parameters.update(kwargs) + xml = GOAL_STATE_TEMPLATE.format(**parameters) + if parameters['certificates_url'] is None: + new_xml_lines = [] + for line in xml.splitlines(): + if 'Certificates' in line: + continue + new_xml_lines.append(line) + xml = '\n'.join(new_xml_lines) + return DataSourceAzure.GoalState(xml, http_client) + + def test_incarnation_parsed_correctly(self): + incarnation = '123' + goal_state = self._get_goal_state(incarnation=incarnation) + self.assertEqual(incarnation, goal_state.incarnation) + + def test_container_id_parsed_correctly(self): + container_id = 'TestContainerId' + goal_state = self._get_goal_state(container_id=container_id) + self.assertEqual(container_id, goal_state.container_id) + + def test_instance_id_parsed_correctly(self): + instance_id = 'TestInstanceId' + goal_state = self._get_goal_state(instance_id=instance_id) + self.assertEqual(instance_id, goal_state.instance_id) + + def test_shared_config_xml_parsed_and_fetched_correctly(self): + http_client = mock.MagicMock() + shared_config_url = 'TestSharedConfigUrl' + goal_state = self._get_goal_state( + http_client=http_client, shared_config_url=shared_config_url) + shared_config_xml = goal_state.shared_config_xml + self.assertEqual(1, http_client.get.call_count) + self.assertEqual(shared_config_url, http_client.get.call_args[0][0]) + self.assertEqual(http_client.get.return_value.contents, + shared_config_xml) + + def test_certificates_xml_parsed_and_fetched_correctly(self): + http_client = mock.MagicMock() + certificates_url = 'TestSharedConfigUrl' + goal_state = self._get_goal_state( + http_client=http_client, certificates_url=certificates_url) + certificates_xml = goal_state.certificates_xml + self.assertEqual(1, http_client.get.call_count) + self.assertEqual(certificates_url, http_client.get.call_args[0][0]) + self.assertTrue(http_client.get.call_args[1].get('secure', False)) + self.assertEqual(http_client.get.return_value.contents, + certificates_xml) + + def test_missing_certificates_skips_http_get(self): + http_client = mock.MagicMock() + goal_state = self._get_goal_state( + http_client=http_client, certificates_url=None) + certificates_xml = goal_state.certificates_xml + self.assertEqual(0, http_client.get.call_count) + self.assertIsNone(certificates_xml) + + +class TestAzureEndpointHttpClient(TestCase): + + regular_headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def setUp(self): + super(TestAzureEndpointHttpClient, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.read_file_or_url = patches.enter_context( + mock.patch.object(DataSourceAzure.util, 'read_file_or_url')) + + def test_non_secure_get(self): + client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + response = client.get(url, secure=False) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual(mock.call(url, headers=self.regular_headers), + self.read_file_or_url.call_args) + + def test_secure_get(self): + url = 'MyTestUrl' + certificate = mock.MagicMock() + expected_headers = self.regular_headers.copy() + expected_headers.update({ + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": certificate, + }) + client = DataSourceAzure.AzureEndpointHttpClient(certificate) + response = client.get(url, secure=True) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual(mock.call(url, headers=expected_headers), + self.read_file_or_url.call_args) + + def test_post(self): + data = mock.MagicMock() + url = 'MyTestUrl' + client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) + response = client.post(url, data=data) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual( + mock.call(url, data=data, headers=self.regular_headers), + self.read_file_or_url.call_args) + + def test_post_with_extra_headers(self): + url = 'MyTestUrl' + client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) + extra_headers = {'test': 'header'} + client.post(url, extra_headers=extra_headers) + self.assertEqual(1, self.read_file_or_url.call_count) + expected_headers = self.regular_headers.copy() + expected_headers.update(extra_headers) + self.assertEqual( + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + self.read_file_or_url.call_args) + + +class TestOpenSSLManager(TestCase): + + def setUp(self): + super(TestOpenSSLManager, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.subp = patches.enter_context( + mock.patch.object(DataSourceAzure.util, 'subp')) + + @mock.patch.object(DataSourceAzure, 'cd', mock.MagicMock()) + @mock.patch.object(DataSourceAzure.tempfile, 'TemporaryDirectory') + def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory): + manager = DataSourceAzure.OpenSSLManager() + self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir) + + @mock.patch('builtins.open') + def test_generate_certificate_uses_tmpdir(self, open): + subp_directory = {} + + def capture_directory(*args, **kwargs): + subp_directory['path'] = os.getcwd() + + self.subp.side_effect = capture_directory + manager = DataSourceAzure.OpenSSLManager() + self.assertEqual(manager.tmpdir.name, subp_directory['path']) + + +class TestWALinuxAgentShim(TestCase): + + def setUp(self): + super(TestWALinuxAgentShim, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.AzureEndpointHttpClient = patches.enter_context( + mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient')) + self.find_endpoint = patches.enter_context( + mock.patch.object(DataSourceAzure, 'find_endpoint')) + self.GoalState = patches.enter_context( + mock.patch.object(DataSourceAzure, 'GoalState')) + self.OpenSSLManager = patches.enter_context( + mock.patch.object(DataSourceAzure, 'OpenSSLManager')) + + def test_http_client_uses_certificate(self): + shim = DataSourceAzure.WALinuxAgentShim() + self.assertEqual( + [mock.call(self.OpenSSLManager.return_value.certificate)], + self.AzureEndpointHttpClient.call_args_list) + self.assertEqual(self.AzureEndpointHttpClient.return_value, + shim.http_client) + + def test_correct_url_used_for_goalstate(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = DataSourceAzure.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + get.call_args_list) + self.assertEqual( + [mock.call(get.return_value.contents, shim.http_client)], + self.GoalState.call_args_list) + + def test_certificates_used_to_determine_public_keys(self): + shim = DataSourceAzure.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(self.GoalState.return_value.certificates_xml)], + self.OpenSSLManager.return_value.parse_certificates.call_args_list) + self.assertEqual( + self.OpenSSLManager.return_value.parse_certificates.return_value, + shim.public_keys) + + def test_absent_certificates_produces_empty_public_keys(self): + self.GoalState.return_value.certificates_xml = None + shim = DataSourceAzure.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + self.assertEqual([], shim.public_keys) + + def test_correct_url_used_for_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = DataSourceAzure.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + shim.http_client.post.call_args_list) + + def test_goal_state_values_used_for_report_ready(self): + self.GoalState.return_value.incarnation = 'TestIncarnation' + self.GoalState.return_value.container_id = 'TestContainerId' + self.GoalState.return_value.instance_id = 'TestInstanceId' + shim = DataSourceAzure.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + posted_document = shim.http_client.post.call_args[1]['data'] + self.assertIn('TestIncarnation', posted_document) + self.assertIn('TestContainerId', posted_document) + self.assertIn('TestInstanceId', posted_document) -- cgit v1.2.3 From 2edfd791b29df3271bdc3aff40d60336ddd636ed Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 12:58:18 +0100 Subject: Return a dict of data from WALinuxAgentShim, rather than accessing attributes. --- cloudinit/sources/DataSourceAzure.py | 46 +++++++++++++++------------ tests/unittests/test_datasource/test_azure.py | 29 ++++++++++++++--- 2 files changed, 49 insertions(+), 26 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index bd3c742b..b93357d5 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -260,7 +260,6 @@ class WALinuxAgentShim(object): def __init__(self): self.endpoint = find_endpoint() - self.goal_state = None self.openssl_manager = OpenSSLManager() self.http_client = AzureEndpointHttpClient( self.openssl_manager.certificate) @@ -276,18 +275,24 @@ class WALinuxAgentShim(object): time.sleep(i + 1) else: break - self.goal_state = GoalState(response.contents, self.http_client) - self.public_keys = [] - if self.goal_state.certificates_xml is not None: - self.public_keys = self.openssl_manager.parse_certificates( - self.goal_state.certificates_xml) - self._report_ready() - - def _report_ready(self): + goal_state = GoalState(response.contents, self.http_client) + public_keys = [] + if goal_state.certificates_xml is not None: + public_keys = self.openssl_manager.parse_certificates( + goal_state.certificates_xml) + data = { + 'instance-id': iid_from_shared_config_content( + goal_state.shared_config_xml), + 'public-keys': public_keys, + } + self._report_ready(goal_state) + return data + + def _report_ready(self, goal_state): document = REPORT_READY_XML_TEMPLATE.format( - incarnation=self.goal_state.incarnation, - container_id=self.goal_state.container_id, - instance_id=self.goal_state.instance_id, + incarnation=goal_state.incarnation, + container_id=goal_state.container_id, + instance_id=goal_state.instance_id, ) self.http_client.post( "http://{}/machine?comp=health".format(self.endpoint), @@ -414,17 +419,16 @@ class DataSourceAzureNet(sources.DataSource): # the directory to be protected. write_files(ddir, files, dirmode=0o700) - shim = WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() - try: - self.metadata['instance-id'] = iid_from_shared_config_content( - shim.goal_state.shared_config_xml) - except ValueError as e: - LOG.warn( - "failed to get instance id in %s: %s", shim.shared_config, e) + shim = WALinuxAgentShim() + data = shim.register_with_azure_and_fetch_data() + except Exception as exc: + LOG.info("Error communicating with Azure fabric; assume we aren't" + " on Azure.", exc_info=True) + return False - self.metadata['public-keys'] = shim.public_keys + self.metadata['instance-id'] = data['instance-id'] + self.metadata['public-keys'] = data['public-keys'] found_ephemeral = find_ephemeral_disk() if found_ephemeral: diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index dc7f2663..fd5b24f8 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -160,6 +160,12 @@ class TestAzureDataSource(TestCase): mod = DataSourceAzure mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d + fake_shim = mock.MagicMock() + fake_shim().register_with_azure_and_fetch_data.return_value = { + 'instance-id': 'i-my-azure-id', + 'public-keys': [], + } + self.apply_patches([ (mod, 'list_possible_azure_ds_devs', dsdevs), (mod, 'invoke_agent', _invoke_agent), @@ -169,7 +175,8 @@ class TestAzureDataSource(TestCase): (mod, 'perform_hostname_bounce', mock.MagicMock()), (mod, 'get_hostname', mock.MagicMock()), (mod, 'set_hostname', mock.MagicMock()), - ]) + (mod, 'WALinuxAgentShim', fake_shim), + ]) dsrc = mod.DataSourceAzureNet( data.get('sys_cfg', {}), distro=None, paths=self.paths) @@ -852,6 +859,9 @@ class TestWALinuxAgentShim(TestCase): mock.patch.object(DataSourceAzure, 'find_endpoint')) self.GoalState = patches.enter_context( mock.patch.object(DataSourceAzure, 'GoalState')) + self.iid_from_shared_config_content = patches.enter_context( + mock.patch.object(DataSourceAzure, + 'iid_from_shared_config_content')) self.OpenSSLManager = patches.enter_context( mock.patch.object(DataSourceAzure, 'OpenSSLManager')) @@ -877,19 +887,28 @@ class TestWALinuxAgentShim(TestCase): def test_certificates_used_to_determine_public_keys(self): shim = DataSourceAzure.WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() + data = shim.register_with_azure_and_fetch_data() self.assertEqual( [mock.call(self.GoalState.return_value.certificates_xml)], self.OpenSSLManager.return_value.parse_certificates.call_args_list) self.assertEqual( self.OpenSSLManager.return_value.parse_certificates.return_value, - shim.public_keys) + data['public-keys']) def test_absent_certificates_produces_empty_public_keys(self): self.GoalState.return_value.certificates_xml = None shim = DataSourceAzure.WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() - self.assertEqual([], shim.public_keys) + data = shim.register_with_azure_and_fetch_data() + self.assertEqual([], data['public-keys']) + + def test_instance_id_returned_in_data(self): + shim = DataSourceAzure.WALinuxAgentShim() + data = shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(self.GoalState.return_value.shared_config_xml)], + self.iid_from_shared_config_content.call_args_list) + self.assertEqual(self.iid_from_shared_config_content.return_value, + data['instance-id']) def test_correct_url_used_for_report_ready(self): self.find_endpoint.return_value = 'test_endpoint' -- cgit v1.2.3 From 28e9e693942d758fb5bdc952c32542c77e16f23a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 12:58:20 +0100 Subject: Add logging. --- cloudinit/sources/DataSourceAzure.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b93357d5..deffd9b2 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -127,6 +127,7 @@ class AzureEndpointHttpClient(object): def find_endpoint(): + LOG.debug('Finding Azure endpoint...') content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') value = None for line in content.splitlines(): @@ -143,7 +144,9 @@ def find_endpoint(): value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) else: value = value.encode('utf-8') - return socket.inet_ntoa(value) + endpoint_ip_address = socket.inet_ntoa(value) + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address class GoalState(object): @@ -199,7 +202,9 @@ class OpenSSLManager(object): self.generate_certificate() def generate_certificate(self): + LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: + LOG.debug('Certificate already generated.') return with cd(self.tmpdir.name): util.subp([ @@ -213,6 +218,7 @@ class OpenSSLManager(object): if "CERTIFICATE" not in line: certificate += line.rstrip() self.certificate = certificate + LOG.debug('New certificate generated.') def parse_certificates(self, certificates_xml): tag = ElementTree.fromstring(certificates_xml).find( @@ -259,6 +265,7 @@ class OpenSSLManager(object): class WALinuxAgentShim(object): def __init__(self): + LOG.debug('WALinuxAgentShim instantiated...') self.endpoint = find_endpoint() self.openssl_manager = OpenSSLManager() self.http_client = AzureEndpointHttpClient( @@ -275,9 +282,11 @@ class WALinuxAgentShim(object): time.sleep(i + 1) else: break + LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, self.http_client) public_keys = [] if goal_state.certificates_xml is not None: + LOG.debug('Certificate XML found; parsing out public keys.') public_keys = self.openssl_manager.parse_certificates( goal_state.certificates_xml) data = { @@ -289,6 +298,7 @@ class WALinuxAgentShim(object): return data def _report_ready(self, goal_state): + LOG.debug('Reporting ready to Azure fabric.') document = REPORT_READY_XML_TEMPLATE.format( incarnation=goal_state.incarnation, container_id=goal_state.container_id, @@ -299,6 +309,7 @@ class WALinuxAgentShim(object): data=document, extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, ) + LOG.info('Reported ready to Azure fabric.') def get_hostname(hostname_command='hostname'): -- cgit v1.2.3 From 4a2b6ef37578b13d7240dc1447bbb715b8a0a077 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 12:58:20 +0100 Subject: Cache certificate response to save on communication with fabric. --- cloudinit/sources/DataSourceAzure.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index deffd9b2..c783732d 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -154,6 +154,7 @@ class GoalState(object): def __init__(self, xml, http_client): self.http_client = http_client self.root = ElementTree.fromstring(xml) + self._certificates_xml = None def _text_from_xpath(self, xpath): element = self.root.find(xpath) @@ -182,11 +183,14 @@ class GoalState(object): @property def certificates_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/Certificates') - if url is not None: - return self.http_client.get(url, secure=True).contents - return None + if self._certificates_xml is None: + url = self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance' + '/Configuration/Certificates') + if url is not None: + self._certificates_xml = self.http_client.get( + url, secure=True).contents + return self._certificates_xml class OpenSSLManager(object): -- cgit v1.2.3 From 7ca682408f857fcfd04bfc026ea6c697c1fd4b86 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 12:59:57 +0100 Subject: Make find_endpoint a staticmethod to clean up top-level namespace. --- cloudinit/sources/DataSourceAzure.py | 84 ++++++++++++++------------- tests/unittests/test_datasource/test_azure.py | 21 ++++--- 2 files changed, 57 insertions(+), 48 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c783732d..ba4afa5f 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -70,22 +70,6 @@ BUILTIN_CLOUD_CONFIG = { DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' -REPORT_READY_XML_TEMPLATE = """\ - - - {incarnation} - - {container_id} - - - {instance_id} - - Ready - - - - -""" @contextmanager @@ -126,29 +110,6 @@ class AzureEndpointHttpClient(object): return util.read_file_or_url(url, data=data, headers=headers) -def find_endpoint(): - LOG.debug('Finding Azure endpoint...') - content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') - value = None - for line in content.splitlines(): - if 'unknown-245' in line: - value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') - if value is None: - raise Exception('No endpoint found in DHCP config.') - if ':' in value: - hex_string = '' - for hex_pair in value.split(':'): - if len(hex_pair) == 1: - hex_pair = '0' + hex_pair - hex_string += hex_pair - value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) - else: - value = value.encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) - return endpoint_ip_address - - class GoalState(object): def __init__(self, xml, http_client): @@ -268,14 +229,55 @@ class OpenSSLManager(object): class WALinuxAgentShim(object): + REPORT_READY_XML_TEMPLATE = '\n'.join([ + '', + '', + ' {incarnation}', + ' ', + ' {container_id}', + ' ', + ' ', + ' {instance_id}', + ' ', + ' Ready', + ' ', + ' ', + ' ', + ' ', + '']) + def __init__(self): LOG.debug('WALinuxAgentShim instantiated...') - self.endpoint = find_endpoint() + self.endpoint = self.find_endpoint() self.openssl_manager = OpenSSLManager() self.http_client = AzureEndpointHttpClient( self.openssl_manager.certificate) self.values = {} + @staticmethod + def find_endpoint(): + LOG.debug('Finding Azure endpoint...') + content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') + value = None + for line in content.splitlines(): + if 'unknown-245' in line: + value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + if value is None: + raise Exception('No endpoint found in DHCP config.') + if ':' in value: + hex_string = '' + for hex_pair in value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) + else: + value = value.encode('utf-8') + endpoint_ip_address = socket.inet_ntoa(value) + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address + def register_with_azure_and_fetch_data(self): LOG.info('Registering with Azure...') for i in range(10): @@ -303,7 +305,7 @@ class WALinuxAgentShim(object): def _report_ready(self, goal_state): LOG.debug('Reporting ready to Azure fabric.') - document = REPORT_READY_XML_TEMPLATE.format( + document = self.REPORT_READY_XML_TEMPLATE.format( incarnation=goal_state.incarnation, container_id=goal_state.container_id, instance_id=goal_state.instance_id, diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index fd5b24f8..28703029 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -637,11 +637,13 @@ class TestFindEndpoint(TestCase): def test_missing_file(self): self.load_file.side_effect = IOError - self.assertRaises(IOError, DataSourceAzure.find_endpoint) + self.assertRaises(IOError, + DataSourceAzure.WALinuxAgentShim.find_endpoint) def test_missing_special_azure_line(self): self.load_file.return_value = '' - self.assertRaises(Exception, DataSourceAzure.find_endpoint) + self.assertRaises(Exception, + DataSourceAzure.WALinuxAgentShim.find_endpoint) def _build_lease_content(self, ip_address, use_hex=True): ip_address_repr = ':'.join( @@ -661,26 +663,30 @@ class TestFindEndpoint(TestCase): ip_address = '98.76.54.32' file_content = self._build_lease_content(ip_address) self.load_file.return_value = file_content - self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + self.assertEqual(ip_address, + DataSourceAzure.WALinuxAgentShim.find_endpoint()) def test_hex_string_with_single_character_part(self): ip_address = '4.3.2.1' file_content = self._build_lease_content(ip_address) self.load_file.return_value = file_content - self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + self.assertEqual(ip_address, + DataSourceAzure.WALinuxAgentShim.find_endpoint()) def test_packed_string(self): ip_address = '98.76.54.32' file_content = self._build_lease_content(ip_address, use_hex=False) self.load_file.return_value = file_content - self.assertEqual(ip_address, DataSourceAzure.find_endpoint()) + self.assertEqual(ip_address, + DataSourceAzure.WALinuxAgentShim.find_endpoint()) def test_latest_lease_used(self): ip_addresses = ['4.3.2.1', '98.76.54.32'] file_content = '\n'.join([self._build_lease_content(ip_address) for ip_address in ip_addresses]) self.load_file.return_value = file_content - self.assertEqual(ip_addresses[-1], DataSourceAzure.find_endpoint()) + self.assertEqual(ip_addresses[-1], + DataSourceAzure.WALinuxAgentShim.find_endpoint()) class TestGoalStateParsing(TestCase): @@ -856,7 +862,8 @@ class TestWALinuxAgentShim(TestCase): self.AzureEndpointHttpClient = patches.enter_context( mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient')) self.find_endpoint = patches.enter_context( - mock.patch.object(DataSourceAzure, 'find_endpoint')) + mock.patch.object( + DataSourceAzure.WALinuxAgentShim, 'find_endpoint')) self.GoalState = patches.enter_context( mock.patch.object(DataSourceAzure, 'GoalState')) self.iid_from_shared_config_content = patches.enter_context( -- cgit v1.2.3 From 917f1792e3f0fe2ae9411530217a1892d9bc6d1c Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 13:00:06 +0100 Subject: Remove unused import. --- cloudinit/sources/DataSourceAzure.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ba4afa5f..c2dc6b4c 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,7 +17,6 @@ # along with this program. If not, see . import base64 -import contextlib import crypt import fnmatch import os -- cgit v1.2.3 From b9f26689e8b3bb7a3486771c6362107232a7dcf4 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 13:16:42 +0100 Subject: Split WALinuxAgentShim code out to separate file. --- cloudinit/sources/DataSourceAzure.py | 271 +-------------- cloudinit/sources/helpers/azure.py | 273 +++++++++++++++ tests/unittests/test_datasource/test_azure.py | 364 -------------------- .../unittests/test_datasource/test_azure_helper.py | 377 +++++++++++++++++++++ 4 files changed, 653 insertions(+), 632 deletions(-) create mode 100644 cloudinit/sources/helpers/azure.py create mode 100644 tests/unittests/test_datasource/test_azure_helper.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c2dc6b4c..5e147950 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -17,23 +17,19 @@ # along with this program. If not, see . import base64 +import contextlib import crypt import fnmatch import os import os.path -import re -import socket -import struct -import tempfile -import time -from contextlib import contextmanager from xml.dom import minidom -from xml.etree import ElementTree from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util +from cloudinit.sources.helpers.azure import ( + iid_from_shared_config_content, WALinuxAgentShim) LOG = logging.getLogger(__name__) @@ -70,253 +66,6 @@ DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' - -@contextmanager -def cd(newdir): - prevdir = os.getcwd() - os.chdir(os.path.expanduser(newdir)) - try: - yield - finally: - os.chdir(prevdir) - - -class AzureEndpointHttpClient(object): - - headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', - } - - def __init__(self, certificate): - self.extra_secure_headers = { - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert": certificate, - } - - def get(self, url, secure=False): - headers = self.headers - if secure: - headers = self.headers.copy() - headers.update(self.extra_secure_headers) - return util.read_file_or_url(url, headers=headers) - - def post(self, url, data=None, extra_headers=None): - headers = self.headers - if extra_headers is not None: - headers = self.headers.copy() - headers.update(extra_headers) - return util.read_file_or_url(url, data=data, headers=headers) - - -class GoalState(object): - - def __init__(self, xml, http_client): - self.http_client = http_client - self.root = ElementTree.fromstring(xml) - self._certificates_xml = None - - def _text_from_xpath(self, xpath): - element = self.root.find(xpath) - if element is not None: - return element.text - return None - - @property - def container_id(self): - return self._text_from_xpath('./Container/ContainerId') - - @property - def incarnation(self): - return self._text_from_xpath('./Incarnation') - - @property - def instance_id(self): - return self._text_from_xpath( - './Container/RoleInstanceList/RoleInstance/InstanceId') - - @property - def shared_config_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/SharedConfig') - return self.http_client.get(url).contents - - @property - def certificates_xml(self): - if self._certificates_xml is None: - url = self._text_from_xpath( - './Container/RoleInstanceList/RoleInstance' - '/Configuration/Certificates') - if url is not None: - self._certificates_xml = self.http_client.get( - url, secure=True).contents - return self._certificates_xml - - -class OpenSSLManager(object): - - certificate_names = { - 'private_key': 'TransportPrivate.pem', - 'certificate': 'TransportCert.pem', - } - - def __init__(self): - self.tmpdir = tempfile.TemporaryDirectory() - self.certificate = None - self.generate_certificate() - - def generate_certificate(self): - LOG.debug('Generating certificate for communication with fabric...') - if self.certificate is not None: - LOG.debug('Certificate already generated.') - return - with cd(self.tmpdir.name): - util.subp([ - 'openssl', 'req', '-x509', '-nodes', '-subj', - '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', - '-keyout', self.certificate_names['private_key'], - '-out', self.certificate_names['certificate'], - ]) - certificate = '' - for line in open(self.certificate_names['certificate']): - if "CERTIFICATE" not in line: - certificate += line.rstrip() - self.certificate = certificate - LOG.debug('New certificate generated.') - - def parse_certificates(self, certificates_xml): - tag = ElementTree.fromstring(certificates_xml).find( - './/Data') - certificates_content = tag.text - lines = [ - b'MIME-Version: 1.0', - b'Content-Disposition: attachment; filename="Certificates.p7m"', - b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"', - b'Content-Transfer-Encoding: base64', - b'', - certificates_content.encode('utf-8'), - ] - with cd(self.tmpdir.name): - with open('Certificates.p7m', 'wb') as f: - f.write(b'\n'.join(lines)) - out, _ = util.subp( - 'openssl cms -decrypt -in Certificates.p7m -inkey' - ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' - ' -password pass:'.format(**self.certificate_names), - shell=True) - private_keys, certificates = [], [] - current = [] - for line in out.splitlines(): - current.append(line) - if re.match(r'[-]+END .*?KEY[-]+$', line): - private_keys.append('\n'.join(current)) - current = [] - elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): - certificates.append('\n'.join(current)) - current = [] - keys = [] - for certificate in certificates: - with cd(self.tmpdir.name): - public_key, _ = util.subp( - 'openssl x509 -noout -pubkey |' - 'ssh-keygen -i -m PKCS8 -f /dev/stdin', - data=certificate, - shell=True) - keys.append(public_key) - return keys - - -class WALinuxAgentShim(object): - - REPORT_READY_XML_TEMPLATE = '\n'.join([ - '', - '', - ' {incarnation}', - ' ', - ' {container_id}', - ' ', - ' ', - ' {instance_id}', - ' ', - ' Ready', - ' ', - ' ', - ' ', - ' ', - '']) - - def __init__(self): - LOG.debug('WALinuxAgentShim instantiated...') - self.endpoint = self.find_endpoint() - self.openssl_manager = OpenSSLManager() - self.http_client = AzureEndpointHttpClient( - self.openssl_manager.certificate) - self.values = {} - - @staticmethod - def find_endpoint(): - LOG.debug('Finding Azure endpoint...') - content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') - value = None - for line in content.splitlines(): - if 'unknown-245' in line: - value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') - if value is None: - raise Exception('No endpoint found in DHCP config.') - if ':' in value: - hex_string = '' - for hex_pair in value.split(':'): - if len(hex_pair) == 1: - hex_pair = '0' + hex_pair - hex_string += hex_pair - value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) - else: - value = value.encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(value) - LOG.debug('Azure endpoint found at %s', endpoint_ip_address) - return endpoint_ip_address - - def register_with_azure_and_fetch_data(self): - LOG.info('Registering with Azure...') - for i in range(10): - try: - response = self.http_client.get( - 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) - except Exception: - time.sleep(i + 1) - else: - break - LOG.debug('Successfully fetched GoalState XML.') - goal_state = GoalState(response.contents, self.http_client) - public_keys = [] - if goal_state.certificates_xml is not None: - LOG.debug('Certificate XML found; parsing out public keys.') - public_keys = self.openssl_manager.parse_certificates( - goal_state.certificates_xml) - data = { - 'instance-id': iid_from_shared_config_content( - goal_state.shared_config_xml), - 'public-keys': public_keys, - } - self._report_ready(goal_state) - return data - - def _report_ready(self, goal_state): - LOG.debug('Reporting ready to Azure fabric.') - document = self.REPORT_READY_XML_TEMPLATE.format( - incarnation=goal_state.incarnation, - container_id=goal_state.container_id, - instance_id=goal_state.instance_id, - ) - self.http_client.post( - "http://{}/machine?comp=health".format(self.endpoint), - data=document, - extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, - ) - LOG.info('Reported ready to Azure fabric.') - - def get_hostname(hostname_command='hostname'): return util.subp(hostname_command, capture=True)[0].strip() @@ -690,20 +439,6 @@ def load_azure_ovf_pubkeys(sshnode): return found -def single_node_at_path(node, pathlist): - curnode = node - for tok in pathlist: - results = find_child(curnode, lambda n: n.localName == tok) - if len(results) == 0: - raise ValueError("missing %s token in %s" % (tok, str(pathlist))) - if len(results) > 1: - raise ValueError("found %s nodes of type %s looking for %s" % - (len(results), tok, str(pathlist))) - curnode = results[0] - - return curnode - - def read_azure_ovf(contents): try: dom = minidom.parseString(contents) diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py new file mode 100644 index 00000000..60f116e0 --- /dev/null +++ b/cloudinit/sources/helpers/azure.py @@ -0,0 +1,273 @@ +import logging +import os +import re +import socket +import struct +import tempfile +import time +from contextlib import contextmanager +from xml.etree import ElementTree + +from cloudinit import util + + +LOG = logging.getLogger(__name__) + + +@contextmanager +def cd(newdir): + prevdir = os.getcwd() + os.chdir(os.path.expanduser(newdir)) + try: + yield + finally: + os.chdir(prevdir) + + +class AzureEndpointHttpClient(object): + + headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def __init__(self, certificate): + self.extra_secure_headers = { + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": certificate, + } + + def get(self, url, secure=False): + headers = self.headers + if secure: + headers = self.headers.copy() + headers.update(self.extra_secure_headers) + return util.read_file_or_url(url, headers=headers) + + def post(self, url, data=None, extra_headers=None): + headers = self.headers + if extra_headers is not None: + headers = self.headers.copy() + headers.update(extra_headers) + return util.read_file_or_url(url, data=data, headers=headers) + + +class GoalState(object): + + def __init__(self, xml, http_client): + self.http_client = http_client + self.root = ElementTree.fromstring(xml) + self._certificates_xml = None + + def _text_from_xpath(self, xpath): + element = self.root.find(xpath) + if element is not None: + return element.text + return None + + @property + def container_id(self): + return self._text_from_xpath('./Container/ContainerId') + + @property + def incarnation(self): + return self._text_from_xpath('./Incarnation') + + @property + def instance_id(self): + return self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance/InstanceId') + + @property + def shared_config_xml(self): + url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' + '/Configuration/SharedConfig') + return self.http_client.get(url).contents + + @property + def certificates_xml(self): + if self._certificates_xml is None: + url = self._text_from_xpath( + './Container/RoleInstanceList/RoleInstance' + '/Configuration/Certificates') + if url is not None: + self._certificates_xml = self.http_client.get( + url, secure=True).contents + return self._certificates_xml + + +class OpenSSLManager(object): + + certificate_names = { + 'private_key': 'TransportPrivate.pem', + 'certificate': 'TransportCert.pem', + } + + def __init__(self): + self.tmpdir = tempfile.TemporaryDirectory() + self.certificate = None + self.generate_certificate() + + def generate_certificate(self): + LOG.debug('Generating certificate for communication with fabric...') + if self.certificate is not None: + LOG.debug('Certificate already generated.') + return + with cd(self.tmpdir.name): + util.subp([ + 'openssl', 'req', '-x509', '-nodes', '-subj', + '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', + '-keyout', self.certificate_names['private_key'], + '-out', self.certificate_names['certificate'], + ]) + certificate = '' + for line in open(self.certificate_names['certificate']): + if "CERTIFICATE" not in line: + certificate += line.rstrip() + self.certificate = certificate + LOG.debug('New certificate generated.') + + def parse_certificates(self, certificates_xml): + tag = ElementTree.fromstring(certificates_xml).find( + './/Data') + certificates_content = tag.text + lines = [ + b'MIME-Version: 1.0', + b'Content-Disposition: attachment; filename="Certificates.p7m"', + b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"', + b'Content-Transfer-Encoding: base64', + b'', + certificates_content.encode('utf-8'), + ] + with cd(self.tmpdir.name): + with open('Certificates.p7m', 'wb') as f: + f.write(b'\n'.join(lines)) + out, _ = util.subp( + 'openssl cms -decrypt -in Certificates.p7m -inkey' + ' {private_key} -recip {certificate} | openssl pkcs12 -nodes' + ' -password pass:'.format(**self.certificate_names), + shell=True) + private_keys, certificates = [], [] + current = [] + for line in out.splitlines(): + current.append(line) + if re.match(r'[-]+END .*?KEY[-]+$', line): + private_keys.append('\n'.join(current)) + current = [] + elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line): + certificates.append('\n'.join(current)) + current = [] + keys = [] + for certificate in certificates: + with cd(self.tmpdir.name): + public_key, _ = util.subp( + 'openssl x509 -noout -pubkey |' + 'ssh-keygen -i -m PKCS8 -f /dev/stdin', + data=certificate, + shell=True) + keys.append(public_key) + return keys + + +def iid_from_shared_config_content(content): + """ + find INSTANCE_ID in: + + + + + """ + root = ElementTree.fromstring(content) + depnode = root.find('Deployment') + return depnode.get('name') + + +class WALinuxAgentShim(object): + + REPORT_READY_XML_TEMPLATE = '\n'.join([ + '', + '', + ' {incarnation}', + ' ', + ' {container_id}', + ' ', + ' ', + ' {instance_id}', + ' ', + ' Ready', + ' ', + ' ', + ' ', + ' ', + '']) + + def __init__(self): + LOG.debug('WALinuxAgentShim instantiated...') + self.endpoint = self.find_endpoint() + self.openssl_manager = OpenSSLManager() + self.http_client = AzureEndpointHttpClient( + self.openssl_manager.certificate) + self.values = {} + + @staticmethod + def find_endpoint(): + LOG.debug('Finding Azure endpoint...') + content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') + value = None + for line in content.splitlines(): + if 'unknown-245' in line: + value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + if value is None: + raise Exception('No endpoint found in DHCP config.') + if ':' in value: + hex_string = '' + for hex_pair in value.split(':'): + if len(hex_pair) == 1: + hex_pair = '0' + hex_pair + hex_string += hex_pair + value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) + else: + value = value.encode('utf-8') + endpoint_ip_address = socket.inet_ntoa(value) + LOG.debug('Azure endpoint found at %s', endpoint_ip_address) + return endpoint_ip_address + + def register_with_azure_and_fetch_data(self): + LOG.info('Registering with Azure...') + for i in range(10): + try: + response = self.http_client.get( + 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) + except Exception: + time.sleep(i + 1) + else: + break + LOG.debug('Successfully fetched GoalState XML.') + goal_state = GoalState(response.contents, self.http_client) + public_keys = [] + if goal_state.certificates_xml is not None: + LOG.debug('Certificate XML found; parsing out public keys.') + public_keys = self.openssl_manager.parse_certificates( + goal_state.certificates_xml) + data = { + 'instance-id': iid_from_shared_config_content( + goal_state.shared_config_xml), + 'public-keys': public_keys, + } + self._report_ready(goal_state) + return data + + def _report_ready(self, goal_state): + LOG.debug('Reporting ready to Azure fabric.') + document = self.REPORT_READY_XML_TEMPLATE.format( + incarnation=goal_state.incarnation, + container_id=goal_state.container_id, + instance_id=goal_state.instance_id, + ) + self.http_client.post( + "http://{}/machine?comp=health".format(self.endpoint), + data=document, + extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, + ) + LOG.info('Reported ready to Azure fabric.') diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 28703029..ee7109e1 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -15,47 +15,9 @@ except ImportError: import crypt import os import stat -import struct import yaml import shutil import tempfile -import unittest - -from cloudinit import url_helper - - -GOAL_STATE_TEMPLATE = """\ - - - 2012-11-30 - {incarnation} - - Started - 300000 - - 16001 - - FALSE - - - {container_id} - - - {instance_id} - Started - - http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=hostingEnvironmentConfig&incarnation=1 - {shared_config_url} - http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=extensionsConfig&incarnation=1 - http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=fullConfig&incarnation=1 - {certificates_url} - 68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml - - - - - -""" def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): @@ -610,329 +572,3 @@ class TestReadAzureOvf(TestCase): for mypk in mypklist: self.assertIn(mypk, cfg['_pubkeys']) - -class TestReadAzureSharedConfig(unittest.TestCase): - def test_valid_content(self): - xml = """ - - - - - - - """ - ret = DataSourceAzure.iid_from_shared_config_content(xml) - self.assertEqual("MY_INSTANCE_ID", ret) - - -class TestFindEndpoint(TestCase): - - def setUp(self): - super(TestFindEndpoint, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.load_file = patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'load_file')) - - def test_missing_file(self): - self.load_file.side_effect = IOError - self.assertRaises(IOError, - DataSourceAzure.WALinuxAgentShim.find_endpoint) - - def test_missing_special_azure_line(self): - self.load_file.return_value = '' - self.assertRaises(Exception, - DataSourceAzure.WALinuxAgentShim.find_endpoint) - - def _build_lease_content(self, ip_address, use_hex=True): - ip_address_repr = ':'.join( - [hex(int(part)).replace('0x', '') - for part in ip_address.split('.')]) - if not use_hex: - ip_address_repr = struct.pack( - '>L', int(ip_address_repr.replace(':', ''), 16)) - ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8')) - return '\n'.join([ - 'lease {', - ' interface "eth0";', - ' option unknown-245 {0};'.format(ip_address_repr), - '}']) - - def test_hex_string(self): - ip_address = '98.76.54.32' - file_content = self._build_lease_content(ip_address) - self.load_file.return_value = file_content - self.assertEqual(ip_address, - DataSourceAzure.WALinuxAgentShim.find_endpoint()) - - def test_hex_string_with_single_character_part(self): - ip_address = '4.3.2.1' - file_content = self._build_lease_content(ip_address) - self.load_file.return_value = file_content - self.assertEqual(ip_address, - DataSourceAzure.WALinuxAgentShim.find_endpoint()) - - def test_packed_string(self): - ip_address = '98.76.54.32' - file_content = self._build_lease_content(ip_address, use_hex=False) - self.load_file.return_value = file_content - self.assertEqual(ip_address, - DataSourceAzure.WALinuxAgentShim.find_endpoint()) - - def test_latest_lease_used(self): - ip_addresses = ['4.3.2.1', '98.76.54.32'] - file_content = '\n'.join([self._build_lease_content(ip_address) - for ip_address in ip_addresses]) - self.load_file.return_value = file_content - self.assertEqual(ip_addresses[-1], - DataSourceAzure.WALinuxAgentShim.find_endpoint()) - - -class TestGoalStateParsing(TestCase): - - default_parameters = { - 'incarnation': 1, - 'container_id': 'MyContainerId', - 'instance_id': 'MyInstanceId', - 'shared_config_url': 'MySharedConfigUrl', - 'certificates_url': 'MyCertificatesUrl', - } - - def _get_goal_state(self, http_client=None, **kwargs): - if http_client is None: - http_client = mock.MagicMock() - parameters = self.default_parameters.copy() - parameters.update(kwargs) - xml = GOAL_STATE_TEMPLATE.format(**parameters) - if parameters['certificates_url'] is None: - new_xml_lines = [] - for line in xml.splitlines(): - if 'Certificates' in line: - continue - new_xml_lines.append(line) - xml = '\n'.join(new_xml_lines) - return DataSourceAzure.GoalState(xml, http_client) - - def test_incarnation_parsed_correctly(self): - incarnation = '123' - goal_state = self._get_goal_state(incarnation=incarnation) - self.assertEqual(incarnation, goal_state.incarnation) - - def test_container_id_parsed_correctly(self): - container_id = 'TestContainerId' - goal_state = self._get_goal_state(container_id=container_id) - self.assertEqual(container_id, goal_state.container_id) - - def test_instance_id_parsed_correctly(self): - instance_id = 'TestInstanceId' - goal_state = self._get_goal_state(instance_id=instance_id) - self.assertEqual(instance_id, goal_state.instance_id) - - def test_shared_config_xml_parsed_and_fetched_correctly(self): - http_client = mock.MagicMock() - shared_config_url = 'TestSharedConfigUrl' - goal_state = self._get_goal_state( - http_client=http_client, shared_config_url=shared_config_url) - shared_config_xml = goal_state.shared_config_xml - self.assertEqual(1, http_client.get.call_count) - self.assertEqual(shared_config_url, http_client.get.call_args[0][0]) - self.assertEqual(http_client.get.return_value.contents, - shared_config_xml) - - def test_certificates_xml_parsed_and_fetched_correctly(self): - http_client = mock.MagicMock() - certificates_url = 'TestSharedConfigUrl' - goal_state = self._get_goal_state( - http_client=http_client, certificates_url=certificates_url) - certificates_xml = goal_state.certificates_xml - self.assertEqual(1, http_client.get.call_count) - self.assertEqual(certificates_url, http_client.get.call_args[0][0]) - self.assertTrue(http_client.get.call_args[1].get('secure', False)) - self.assertEqual(http_client.get.return_value.contents, - certificates_xml) - - def test_missing_certificates_skips_http_get(self): - http_client = mock.MagicMock() - goal_state = self._get_goal_state( - http_client=http_client, certificates_url=None) - certificates_xml = goal_state.certificates_xml - self.assertEqual(0, http_client.get.call_count) - self.assertIsNone(certificates_xml) - - -class TestAzureEndpointHttpClient(TestCase): - - regular_headers = { - 'x-ms-agent-name': 'WALinuxAgent', - 'x-ms-version': '2012-11-30', - } - - def setUp(self): - super(TestAzureEndpointHttpClient, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.read_file_or_url = patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'read_file_or_url')) - - def test_non_secure_get(self): - client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) - url = 'MyTestUrl' - response = client.get(url, secure=False) - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=self.regular_headers), - self.read_file_or_url.call_args) - - def test_secure_get(self): - url = 'MyTestUrl' - certificate = mock.MagicMock() - expected_headers = self.regular_headers.copy() - expected_headers.update({ - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert": certificate, - }) - client = DataSourceAzure.AzureEndpointHttpClient(certificate) - response = client.get(url, secure=True) - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual(mock.call(url, headers=expected_headers), - self.read_file_or_url.call_args) - - def test_post(self): - data = mock.MagicMock() - url = 'MyTestUrl' - client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) - response = client.post(url, data=data) - self.assertEqual(1, self.read_file_or_url.call_count) - self.assertEqual(self.read_file_or_url.return_value, response) - self.assertEqual( - mock.call(url, data=data, headers=self.regular_headers), - self.read_file_or_url.call_args) - - def test_post_with_extra_headers(self): - url = 'MyTestUrl' - client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock()) - extra_headers = {'test': 'header'} - client.post(url, extra_headers=extra_headers) - self.assertEqual(1, self.read_file_or_url.call_count) - expected_headers = self.regular_headers.copy() - expected_headers.update(extra_headers) - self.assertEqual( - mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), - self.read_file_or_url.call_args) - - -class TestOpenSSLManager(TestCase): - - def setUp(self): - super(TestOpenSSLManager, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.subp = patches.enter_context( - mock.patch.object(DataSourceAzure.util, 'subp')) - - @mock.patch.object(DataSourceAzure, 'cd', mock.MagicMock()) - @mock.patch.object(DataSourceAzure.tempfile, 'TemporaryDirectory') - def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory): - manager = DataSourceAzure.OpenSSLManager() - self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir) - - @mock.patch('builtins.open') - def test_generate_certificate_uses_tmpdir(self, open): - subp_directory = {} - - def capture_directory(*args, **kwargs): - subp_directory['path'] = os.getcwd() - - self.subp.side_effect = capture_directory - manager = DataSourceAzure.OpenSSLManager() - self.assertEqual(manager.tmpdir.name, subp_directory['path']) - - -class TestWALinuxAgentShim(TestCase): - - def setUp(self): - super(TestWALinuxAgentShim, self).setUp() - patches = ExitStack() - self.addCleanup(patches.close) - - self.AzureEndpointHttpClient = patches.enter_context( - mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient')) - self.find_endpoint = patches.enter_context( - mock.patch.object( - DataSourceAzure.WALinuxAgentShim, 'find_endpoint')) - self.GoalState = patches.enter_context( - mock.patch.object(DataSourceAzure, 'GoalState')) - self.iid_from_shared_config_content = patches.enter_context( - mock.patch.object(DataSourceAzure, - 'iid_from_shared_config_content')) - self.OpenSSLManager = patches.enter_context( - mock.patch.object(DataSourceAzure, 'OpenSSLManager')) - - def test_http_client_uses_certificate(self): - shim = DataSourceAzure.WALinuxAgentShim() - self.assertEqual( - [mock.call(self.OpenSSLManager.return_value.certificate)], - self.AzureEndpointHttpClient.call_args_list) - self.assertEqual(self.AzureEndpointHttpClient.return_value, - shim.http_client) - - def test_correct_url_used_for_goalstate(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = DataSourceAzure.WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() - get = self.AzureEndpointHttpClient.return_value.get - self.assertEqual( - [mock.call('http://test_endpoint/machine/?comp=goalstate')], - get.call_args_list) - self.assertEqual( - [mock.call(get.return_value.contents, shim.http_client)], - self.GoalState.call_args_list) - - def test_certificates_used_to_determine_public_keys(self): - shim = DataSourceAzure.WALinuxAgentShim() - data = shim.register_with_azure_and_fetch_data() - self.assertEqual( - [mock.call(self.GoalState.return_value.certificates_xml)], - self.OpenSSLManager.return_value.parse_certificates.call_args_list) - self.assertEqual( - self.OpenSSLManager.return_value.parse_certificates.return_value, - data['public-keys']) - - def test_absent_certificates_produces_empty_public_keys(self): - self.GoalState.return_value.certificates_xml = None - shim = DataSourceAzure.WALinuxAgentShim() - data = shim.register_with_azure_and_fetch_data() - self.assertEqual([], data['public-keys']) - - def test_instance_id_returned_in_data(self): - shim = DataSourceAzure.WALinuxAgentShim() - data = shim.register_with_azure_and_fetch_data() - self.assertEqual( - [mock.call(self.GoalState.return_value.shared_config_xml)], - self.iid_from_shared_config_content.call_args_list) - self.assertEqual(self.iid_from_shared_config_content.return_value, - data['instance-id']) - - def test_correct_url_used_for_report_ready(self): - self.find_endpoint.return_value = 'test_endpoint' - shim = DataSourceAzure.WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() - expected_url = 'http://test_endpoint/machine?comp=health' - self.assertEqual( - [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - shim.http_client.post.call_args_list) - - def test_goal_state_values_used_for_report_ready(self): - self.GoalState.return_value.incarnation = 'TestIncarnation' - self.GoalState.return_value.container_id = 'TestContainerId' - self.GoalState.return_value.instance_id = 'TestInstanceId' - shim = DataSourceAzure.WALinuxAgentShim() - shim.register_with_azure_and_fetch_data() - posted_document = shim.http_client.post.call_args[1]['data'] - self.assertIn('TestIncarnation', posted_document) - self.assertIn('TestContainerId', posted_document) - self.assertIn('TestInstanceId', posted_document) diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py new file mode 100644 index 00000000..47b77840 --- /dev/null +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -0,0 +1,377 @@ +import os +import struct +import unittest + +from cloudinit.sources.helpers import azure as azure_helper +from ..helpers import TestCase + +try: + from unittest import mock +except ImportError: + import mock + +try: + from contextlib import ExitStack +except ImportError: + from contextlib2 import ExitStack + + +GOAL_STATE_TEMPLATE = """\ + + + 2012-11-30 + {incarnation} + + Started + 300000 + + 16001 + + FALSE + + + {container_id} + + + {instance_id} + Started + + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=hostingEnvironmentConfig&incarnation=1 + {shared_config_url} + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=extensionsConfig&incarnation=1 + http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&type=fullConfig&incarnation=1 + {certificates_url} + 68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml + + + + + +""" + + +class TestReadAzureSharedConfig(unittest.TestCase): + + def test_valid_content(self): + xml = """ + + + + + + + """ + ret = azure_helper.iid_from_shared_config_content(xml) + self.assertEqual("MY_INSTANCE_ID", ret) + + +class TestFindEndpoint(TestCase): + + def setUp(self): + super(TestFindEndpoint, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.load_file = patches.enter_context( + mock.patch.object(azure_helper.util, 'load_file')) + + def test_missing_file(self): + self.load_file.side_effect = IOError + self.assertRaises(IOError, + azure_helper.WALinuxAgentShim.find_endpoint) + + def test_missing_special_azure_line(self): + self.load_file.return_value = '' + self.assertRaises(Exception, + azure_helper.WALinuxAgentShim.find_endpoint) + + def _build_lease_content(self, ip_address, use_hex=True): + ip_address_repr = ':'.join( + [hex(int(part)).replace('0x', '') + for part in ip_address.split('.')]) + if not use_hex: + ip_address_repr = struct.pack( + '>L', int(ip_address_repr.replace(':', ''), 16)) + ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8')) + return '\n'.join([ + 'lease {', + ' interface "eth0";', + ' option unknown-245 {0};'.format(ip_address_repr), + '}']) + + def test_hex_string(self): + ip_address = '98.76.54.32' + file_content = self._build_lease_content(ip_address) + self.load_file.return_value = file_content + self.assertEqual(ip_address, + azure_helper.WALinuxAgentShim.find_endpoint()) + + def test_hex_string_with_single_character_part(self): + ip_address = '4.3.2.1' + file_content = self._build_lease_content(ip_address) + self.load_file.return_value = file_content + self.assertEqual(ip_address, + azure_helper.WALinuxAgentShim.find_endpoint()) + + def test_packed_string(self): + ip_address = '98.76.54.32' + file_content = self._build_lease_content(ip_address, use_hex=False) + self.load_file.return_value = file_content + self.assertEqual(ip_address, + azure_helper.WALinuxAgentShim.find_endpoint()) + + def test_latest_lease_used(self): + ip_addresses = ['4.3.2.1', '98.76.54.32'] + file_content = '\n'.join([self._build_lease_content(ip_address) + for ip_address in ip_addresses]) + self.load_file.return_value = file_content + self.assertEqual(ip_addresses[-1], + azure_helper.WALinuxAgentShim.find_endpoint()) + + +class TestGoalStateParsing(TestCase): + + default_parameters = { + 'incarnation': 1, + 'container_id': 'MyContainerId', + 'instance_id': 'MyInstanceId', + 'shared_config_url': 'MySharedConfigUrl', + 'certificates_url': 'MyCertificatesUrl', + } + + def _get_goal_state(self, http_client=None, **kwargs): + if http_client is None: + http_client = mock.MagicMock() + parameters = self.default_parameters.copy() + parameters.update(kwargs) + xml = GOAL_STATE_TEMPLATE.format(**parameters) + if parameters['certificates_url'] is None: + new_xml_lines = [] + for line in xml.splitlines(): + if 'Certificates' in line: + continue + new_xml_lines.append(line) + xml = '\n'.join(new_xml_lines) + return azure_helper.GoalState(xml, http_client) + + def test_incarnation_parsed_correctly(self): + incarnation = '123' + goal_state = self._get_goal_state(incarnation=incarnation) + self.assertEqual(incarnation, goal_state.incarnation) + + def test_container_id_parsed_correctly(self): + container_id = 'TestContainerId' + goal_state = self._get_goal_state(container_id=container_id) + self.assertEqual(container_id, goal_state.container_id) + + def test_instance_id_parsed_correctly(self): + instance_id = 'TestInstanceId' + goal_state = self._get_goal_state(instance_id=instance_id) + self.assertEqual(instance_id, goal_state.instance_id) + + def test_shared_config_xml_parsed_and_fetched_correctly(self): + http_client = mock.MagicMock() + shared_config_url = 'TestSharedConfigUrl' + goal_state = self._get_goal_state( + http_client=http_client, shared_config_url=shared_config_url) + shared_config_xml = goal_state.shared_config_xml + self.assertEqual(1, http_client.get.call_count) + self.assertEqual(shared_config_url, http_client.get.call_args[0][0]) + self.assertEqual(http_client.get.return_value.contents, + shared_config_xml) + + def test_certificates_xml_parsed_and_fetched_correctly(self): + http_client = mock.MagicMock() + certificates_url = 'TestSharedConfigUrl' + goal_state = self._get_goal_state( + http_client=http_client, certificates_url=certificates_url) + certificates_xml = goal_state.certificates_xml + self.assertEqual(1, http_client.get.call_count) + self.assertEqual(certificates_url, http_client.get.call_args[0][0]) + self.assertTrue(http_client.get.call_args[1].get('secure', False)) + self.assertEqual(http_client.get.return_value.contents, + certificates_xml) + + def test_missing_certificates_skips_http_get(self): + http_client = mock.MagicMock() + goal_state = self._get_goal_state( + http_client=http_client, certificates_url=None) + certificates_xml = goal_state.certificates_xml + self.assertEqual(0, http_client.get.call_count) + self.assertIsNone(certificates_xml) + + +class TestAzureEndpointHttpClient(TestCase): + + regular_headers = { + 'x-ms-agent-name': 'WALinuxAgent', + 'x-ms-version': '2012-11-30', + } + + def setUp(self): + super(TestAzureEndpointHttpClient, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.read_file_or_url = patches.enter_context( + mock.patch.object(azure_helper.util, 'read_file_or_url')) + + def test_non_secure_get(self): + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + url = 'MyTestUrl' + response = client.get(url, secure=False) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual(mock.call(url, headers=self.regular_headers), + self.read_file_or_url.call_args) + + def test_secure_get(self): + url = 'MyTestUrl' + certificate = mock.MagicMock() + expected_headers = self.regular_headers.copy() + expected_headers.update({ + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": certificate, + }) + client = azure_helper.AzureEndpointHttpClient(certificate) + response = client.get(url, secure=True) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual(mock.call(url, headers=expected_headers), + self.read_file_or_url.call_args) + + def test_post(self): + data = mock.MagicMock() + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + response = client.post(url, data=data) + self.assertEqual(1, self.read_file_or_url.call_count) + self.assertEqual(self.read_file_or_url.return_value, response) + self.assertEqual( + mock.call(url, data=data, headers=self.regular_headers), + self.read_file_or_url.call_args) + + def test_post_with_extra_headers(self): + url = 'MyTestUrl' + client = azure_helper.AzureEndpointHttpClient(mock.MagicMock()) + extra_headers = {'test': 'header'} + client.post(url, extra_headers=extra_headers) + self.assertEqual(1, self.read_file_or_url.call_count) + expected_headers = self.regular_headers.copy() + expected_headers.update(extra_headers) + self.assertEqual( + mock.call(mock.ANY, data=mock.ANY, headers=expected_headers), + self.read_file_or_url.call_args) + + +class TestOpenSSLManager(TestCase): + + def setUp(self): + super(TestOpenSSLManager, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.subp = patches.enter_context( + mock.patch.object(azure_helper.util, 'subp')) + + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.tempfile, 'TemporaryDirectory') + def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory): + manager = azure_helper.OpenSSLManager() + self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir) + + @mock.patch('builtins.open') + def test_generate_certificate_uses_tmpdir(self, open): + subp_directory = {} + + def capture_directory(*args, **kwargs): + subp_directory['path'] = os.getcwd() + + self.subp.side_effect = capture_directory + manager = azure_helper.OpenSSLManager() + self.assertEqual(manager.tmpdir.name, subp_directory['path']) + + +class TestWALinuxAgentShim(TestCase): + + def setUp(self): + super(TestWALinuxAgentShim, self).setUp() + patches = ExitStack() + self.addCleanup(patches.close) + + self.AzureEndpointHttpClient = patches.enter_context( + mock.patch.object(azure_helper, 'AzureEndpointHttpClient')) + self.find_endpoint = patches.enter_context( + mock.patch.object( + azure_helper.WALinuxAgentShim, 'find_endpoint')) + self.GoalState = patches.enter_context( + mock.patch.object(azure_helper, 'GoalState')) + self.iid_from_shared_config_content = patches.enter_context( + mock.patch.object(azure_helper, 'iid_from_shared_config_content')) + self.OpenSSLManager = patches.enter_context( + mock.patch.object(azure_helper, 'OpenSSLManager')) + + def test_http_client_uses_certificate(self): + shim = azure_helper.WALinuxAgentShim() + self.assertEqual( + [mock.call(self.OpenSSLManager.return_value.certificate)], + self.AzureEndpointHttpClient.call_args_list) + self.assertEqual(self.AzureEndpointHttpClient.return_value, + shim.http_client) + + def test_correct_url_used_for_goalstate(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = azure_helper.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + get = self.AzureEndpointHttpClient.return_value.get + self.assertEqual( + [mock.call('http://test_endpoint/machine/?comp=goalstate')], + get.call_args_list) + self.assertEqual( + [mock.call(get.return_value.contents, shim.http_client)], + self.GoalState.call_args_list) + + def test_certificates_used_to_determine_public_keys(self): + shim = azure_helper.WALinuxAgentShim() + data = shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(self.GoalState.return_value.certificates_xml)], + self.OpenSSLManager.return_value.parse_certificates.call_args_list) + self.assertEqual( + self.OpenSSLManager.return_value.parse_certificates.return_value, + data['public-keys']) + + def test_absent_certificates_produces_empty_public_keys(self): + self.GoalState.return_value.certificates_xml = None + shim = azure_helper.WALinuxAgentShim() + data = shim.register_with_azure_and_fetch_data() + self.assertEqual([], data['public-keys']) + + def test_instance_id_returned_in_data(self): + shim = azure_helper.WALinuxAgentShim() + data = shim.register_with_azure_and_fetch_data() + self.assertEqual( + [mock.call(self.GoalState.return_value.shared_config_xml)], + self.iid_from_shared_config_content.call_args_list) + self.assertEqual(self.iid_from_shared_config_content.return_value, + data['instance-id']) + + def test_correct_url_used_for_report_ready(self): + self.find_endpoint.return_value = 'test_endpoint' + shim = azure_helper.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + expected_url = 'http://test_endpoint/machine?comp=health' + self.assertEqual( + [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], + shim.http_client.post.call_args_list) + + def test_goal_state_values_used_for_report_ready(self): + self.GoalState.return_value.incarnation = 'TestIncarnation' + self.GoalState.return_value.container_id = 'TestContainerId' + self.GoalState.return_value.instance_id = 'TestInstanceId' + shim = azure_helper.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + posted_document = shim.http_client.post.call_args[1]['data'] + self.assertIn('TestIncarnation', posted_document) + self.assertIn('TestContainerId', posted_document) + self.assertIn('TestInstanceId', posted_document) -- cgit v1.2.3 From 9c7643c4a0dee7843963709c361b755baf843a4b Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 13:16:44 +0100 Subject: Stop using Python 3 only tempfile.TemporaryDirectory (but lose free cleanup). --- cloudinit/sources/helpers/azure.py | 8 ++++---- tests/unittests/test_datasource/test_azure_helper.py | 17 +++++++++++------ 2 files changed, 15 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 60f116e0..cb13187f 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -104,7 +104,7 @@ class OpenSSLManager(object): } def __init__(self): - self.tmpdir = tempfile.TemporaryDirectory() + self.tmpdir = tempfile.mkdtemp() self.certificate = None self.generate_certificate() @@ -113,7 +113,7 @@ class OpenSSLManager(object): if self.certificate is not None: LOG.debug('Certificate already generated.') return - with cd(self.tmpdir.name): + with cd(self.tmpdir): util.subp([ 'openssl', 'req', '-x509', '-nodes', '-subj', '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048', @@ -139,7 +139,7 @@ class OpenSSLManager(object): b'', certificates_content.encode('utf-8'), ] - with cd(self.tmpdir.name): + with cd(self.tmpdir): with open('Certificates.p7m', 'wb') as f: f.write(b'\n'.join(lines)) out, _ = util.subp( @@ -159,7 +159,7 @@ class OpenSSLManager(object): current = [] keys = [] for certificate in certificates: - with cd(self.tmpdir.name): + with cd(self.tmpdir): public_key, _ = util.subp( 'openssl x509 -noout -pubkey |' 'ssh-keygen -i -m PKCS8 -f /dev/stdin', diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 47b77840..398a9007 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -273,15 +273,20 @@ class TestOpenSSLManager(TestCase): self.subp = patches.enter_context( mock.patch.object(azure_helper.util, 'subp')) + try: + self.open = patches.enter_context( + mock.patch('__builtin__.open')) + except ImportError: + self.open = patches.enter_context( + mock.patch('builtins.open')) @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) - @mock.patch.object(azure_helper.tempfile, 'TemporaryDirectory') - def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory): + @mock.patch.object(azure_helper.tempfile, 'mkdtemp') + def test_openssl_manager_creates_a_tmpdir(self, mkdtemp): manager = azure_helper.OpenSSLManager() - self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir) + self.assertEqual(mkdtemp.return_value, manager.tmpdir) - @mock.patch('builtins.open') - def test_generate_certificate_uses_tmpdir(self, open): + def test_generate_certificate_uses_tmpdir(self): subp_directory = {} def capture_directory(*args, **kwargs): @@ -289,7 +294,7 @@ class TestOpenSSLManager(TestCase): self.subp.side_effect = capture_directory manager = azure_helper.OpenSSLManager() - self.assertEqual(manager.tmpdir.name, subp_directory['path']) + self.assertEqual(manager.tmpdir, subp_directory['path']) class TestWALinuxAgentShim(TestCase): -- cgit v1.2.3 From 84868622c404cda5efd2a753e2de30c1afca49a2 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 13:18:02 +0100 Subject: Move our walinuxagent implementation to a single function call. --- cloudinit/sources/DataSourceAzure.py | 8 ++-- cloudinit/sources/helpers/azure.py | 31 ++++++++---- tests/unittests/test_datasource/test_azure.py | 19 ++++++-- .../unittests/test_datasource/test_azure_helper.py | 56 ++++++++++++++++++++-- 4 files changed, 92 insertions(+), 22 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 5e147950..4053cfa6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -29,7 +29,7 @@ from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util from cloudinit.sources.helpers.azure import ( - iid_from_shared_config_content, WALinuxAgentShim) + get_metadata_from_fabric, iid_from_shared_config_content) LOG = logging.getLogger(__name__) @@ -185,15 +185,13 @@ class DataSourceAzureNet(sources.DataSource): write_files(ddir, files, dirmode=0o700) try: - shim = WALinuxAgentShim() - data = shim.register_with_azure_and_fetch_data() + fabric_data = get_metadata_from_fabric() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) return False - self.metadata['instance-id'] = data['instance-id'] - self.metadata['public-keys'] = data['public-keys'] + self.metadata.update(fabric_data) found_ephemeral = find_ephemeral_disk() if found_ephemeral: diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index cb13187f..dfdfa7c2 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -108,6 +108,9 @@ class OpenSSLManager(object): self.certificate = None self.generate_certificate() + def clean_up(self): + util.del_dir(self.tmpdir) + def generate_certificate(self): LOG.debug('Generating certificate for communication with fabric...') if self.certificate is not None: @@ -205,11 +208,13 @@ class WALinuxAgentShim(object): def __init__(self): LOG.debug('WALinuxAgentShim instantiated...') self.endpoint = self.find_endpoint() - self.openssl_manager = OpenSSLManager() - self.http_client = AzureEndpointHttpClient( - self.openssl_manager.certificate) + self.openssl_manager = None self.values = {} + def clean_up(self): + if self.openssl_manager is not None: + self.openssl_manager.clean_up() + @staticmethod def find_endpoint(): LOG.debug('Finding Azure endpoint...') @@ -234,17 +239,19 @@ class WALinuxAgentShim(object): return endpoint_ip_address def register_with_azure_and_fetch_data(self): + self.openssl_manager = OpenSSLManager() + http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') for i in range(10): try: - response = self.http_client.get( + response = http_client.get( 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) except Exception: time.sleep(i + 1) else: break LOG.debug('Successfully fetched GoalState XML.') - goal_state = GoalState(response.contents, self.http_client) + goal_state = GoalState(response.contents, http_client) public_keys = [] if goal_state.certificates_xml is not None: LOG.debug('Certificate XML found; parsing out public keys.') @@ -255,19 +262,27 @@ class WALinuxAgentShim(object): goal_state.shared_config_xml), 'public-keys': public_keys, } - self._report_ready(goal_state) + self._report_ready(goal_state, http_client) return data - def _report_ready(self, goal_state): + def _report_ready(self, goal_state, http_client): LOG.debug('Reporting ready to Azure fabric.') document = self.REPORT_READY_XML_TEMPLATE.format( incarnation=goal_state.incarnation, container_id=goal_state.container_id, instance_id=goal_state.instance_id, ) - self.http_client.post( + http_client.post( "http://{}/machine?comp=health".format(self.endpoint), data=document, extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, ) LOG.info('Reported ready to Azure fabric.') + + +def get_metadata_from_fabric(): + shim = WALinuxAgentShim() + try: + return shim.register_with_azure_and_fetch_data() + finally: + shim.clean_up() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ee7109e1..983be4cd 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -122,11 +122,10 @@ class TestAzureDataSource(TestCase): mod = DataSourceAzure mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d - fake_shim = mock.MagicMock() - fake_shim().register_with_azure_and_fetch_data.return_value = { + self.get_metadata_from_fabric = mock.MagicMock(return_value={ 'instance-id': 'i-my-azure-id', 'public-keys': [], - } + }) self.apply_patches([ (mod, 'list_possible_azure_ds_devs', dsdevs), @@ -137,7 +136,7 @@ class TestAzureDataSource(TestCase): (mod, 'perform_hostname_bounce', mock.MagicMock()), (mod, 'get_hostname', mock.MagicMock()), (mod, 'set_hostname', mock.MagicMock()), - (mod, 'WALinuxAgentShim', fake_shim), + (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), ]) dsrc = mod.DataSourceAzureNet( @@ -388,6 +387,18 @@ class TestAzureDataSource(TestCase): self.assertEqual(new_ovfenv, load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))) + def test_exception_fetching_fabric_data_doesnt_propagate(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.get_metadata_from_fabric.side_effect = Exception + self.assertFalse(ds.get_data()) + + def test_fabric_data_included_in_metadata(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + self.get_metadata_from_fabric.return_value = {'test': 'value'} + ret = ds.get_data() + self.assertTrue(ret) + self.assertEqual('value', ds.metadata['test']) + class TestAzureBounce(TestCase): diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 398a9007..5fac2ade 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -296,6 +296,14 @@ class TestOpenSSLManager(TestCase): manager = azure_helper.OpenSSLManager() self.assertEqual(manager.tmpdir, subp_directory['path']) + @mock.patch.object(azure_helper, 'cd', mock.MagicMock()) + @mock.patch.object(azure_helper.tempfile, 'mkdtemp', mock.MagicMock()) + @mock.patch.object(azure_helper.util, 'del_dir') + def test_clean_up(self, del_dir): + manager = azure_helper.OpenSSLManager() + manager.clean_up() + self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list) + class TestWALinuxAgentShim(TestCase): @@ -318,11 +326,10 @@ class TestWALinuxAgentShim(TestCase): def test_http_client_uses_certificate(self): shim = azure_helper.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() self.assertEqual( [mock.call(self.OpenSSLManager.return_value.certificate)], self.AzureEndpointHttpClient.call_args_list) - self.assertEqual(self.AzureEndpointHttpClient.return_value, - shim.http_client) def test_correct_url_used_for_goalstate(self): self.find_endpoint.return_value = 'test_endpoint' @@ -333,7 +340,8 @@ class TestWALinuxAgentShim(TestCase): [mock.call('http://test_endpoint/machine/?comp=goalstate')], get.call_args_list) self.assertEqual( - [mock.call(get.return_value.contents, shim.http_client)], + [mock.call(get.return_value.contents, + self.AzureEndpointHttpClient.return_value)], self.GoalState.call_args_list) def test_certificates_used_to_determine_public_keys(self): @@ -368,7 +376,7 @@ class TestWALinuxAgentShim(TestCase): expected_url = 'http://test_endpoint/machine?comp=health' self.assertEqual( [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)], - shim.http_client.post.call_args_list) + self.AzureEndpointHttpClient.return_value.post.call_args_list) def test_goal_state_values_used_for_report_ready(self): self.GoalState.return_value.incarnation = 'TestIncarnation' @@ -376,7 +384,45 @@ class TestWALinuxAgentShim(TestCase): self.GoalState.return_value.instance_id = 'TestInstanceId' shim = azure_helper.WALinuxAgentShim() shim.register_with_azure_and_fetch_data() - posted_document = shim.http_client.post.call_args[1]['data'] + posted_document = ( + self.AzureEndpointHttpClient.return_value.post.call_args[1]['data'] + ) self.assertIn('TestIncarnation', posted_document) self.assertIn('TestContainerId', posted_document) self.assertIn('TestInstanceId', posted_document) + + def test_clean_up_can_be_called_at_any_time(self): + shim = azure_helper.WALinuxAgentShim() + shim.clean_up() + + def test_clean_up_will_clean_up_openssl_manager_if_instantiated(self): + shim = azure_helper.WALinuxAgentShim() + shim.register_with_azure_and_fetch_data() + shim.clean_up() + self.assertEqual( + 1, self.OpenSSLManager.return_value.clean_up.call_count) + + +class TestGetMetadataFromFabric(TestCase): + + @mock.patch.object(azure_helper, 'WALinuxAgentShim') + def test_data_from_shim_returned(self, shim): + ret = azure_helper.get_metadata_from_fabric() + self.assertEqual( + shim.return_value.register_with_azure_and_fetch_data.return_value, + ret) + + @mock.patch.object(azure_helper, 'WALinuxAgentShim') + def test_success_calls_clean_up(self, shim): + azure_helper.get_metadata_from_fabric() + self.assertEqual(1, shim.return_value.clean_up.call_count) + + @mock.patch.object(azure_helper, 'WALinuxAgentShim') + def test_failure_in_registration_calls_clean_up(self, shim): + class SentinelException(Exception): + pass + shim.return_value.register_with_azure_and_fetch_data.side_effect = ( + SentinelException) + self.assertRaises(SentinelException, + azure_helper.get_metadata_from_fabric) + self.assertEqual(1, shim.return_value.clean_up.call_count) -- cgit v1.2.3 From 1185aeae80fc8279946069bb8eec492b3cb81556 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 16:22:36 +0100 Subject: Reintroduce original code path. --- cloudinit/sources/DataSourceAzure.py | 74 +++++++++++++++++++++------ tests/unittests/test_datasource/test_azure.py | 5 ++ 2 files changed, 63 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4053cfa6..3c7820a6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -22,6 +22,7 @@ import crypt import fnmatch import os import os.path +import time from xml.dom import minidom from cloudinit import log as logging @@ -35,11 +36,13 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} +AGENT_START = ['service', 'walinuxagent', 'start'] BOUNCE_COMMAND = ['sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { + 'agent_command': '__builtin__', 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { @@ -110,6 +113,56 @@ class DataSourceAzureNet(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def get_metadata_from_agent(self): + temp_hostname = self.metadata.get('local-hostname') + hostname_command = self.ds_cfg['hostname_bounce']['hostname_command'] + with temporary_hostname(temp_hostname, self.ds_cfg, + hostname_command=hostname_command) \ + as previous_hostname: + if (previous_hostname is not None + and util.is_true(self.ds_cfg.get('set_hostname'))): + cfg = self.ds_cfg['hostname_bounce'] + try: + perform_hostname_bounce(hostname=temp_hostname, + cfg=cfg, + prev_hostname=previous_hostname) + except Exception as e: + LOG.warn("Failed publishing hostname: %s", e) + util.logexc(LOG, "handling set_hostname failed") + + try: + invoke_agent(self.ds_cfg['agent_command']) + except util.ProcessExecutionError: + # claim the datasource even if the command failed + util.logexc(LOG, "agent command '%s' failed.", + self.ds_cfg['agent_command']) + + ddir = self.ds_cfg['data_dir'] + shcfgxml = os.path.join(ddir, "SharedConfig.xml") + wait_for = [shcfgxml] + + fp_files = [] + for pk in self.cfg.get('_pubkeys', []): + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + + missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", + func=wait_for_files, + args=(wait_for + fp_files,)) + if len(missing): + LOG.warn("Did not find files, but going on: %s", missing) + + metadata = {} + if shcfgxml in missing: + LOG.warn("SharedConfig.xml missing, using static instance-id") + else: + try: + metadata['instance-id'] = iid_from_shared_config(shcfgxml) + except ValueError as e: + LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) + metadata['public-keys'] = pubkeys_from_crt_files(fp_files) + return metadata + def get_data(self): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we @@ -162,8 +215,6 @@ class DataSourceAzureNet(sources.DataSource): # now update ds_cfg to reflect contents pass in config user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - mycfg = self.ds_cfg - ddir = mycfg['data_dir'] if found != ddir: cached_ovfenv = util.load_file( @@ -184,8 +235,12 @@ class DataSourceAzureNet(sources.DataSource): # the directory to be protected. write_files(ddir, files, dirmode=0o700) + if self.ds_cfg['agent_command'] == '__builtin__': + metadata_func = get_metadata_from_fabric + else: + metadata_func = self.get_metadata_from_agent try: - fabric_data = get_metadata_from_fabric() + fabric_data = metadata_func() except Exception as exc: LOG.info("Error communicating with Azure fabric; assume we aren't" " on Azure.", exc_info=True) @@ -567,19 +622,6 @@ def iid_from_shared_config(path): return iid_from_shared_config_content(content) -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - - - - - """ - dom = minidom.parseString(content) - depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"]) - return depnode.attributes.get('name').value - - class BrokenAzureDataSource(Exception): pass diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 983be4cd..c72dc801 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -389,11 +389,13 @@ class TestAzureDataSource(TestCase): def test_exception_fetching_fabric_data_doesnt_propagate(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.ds_cfg['agent_command'] = '__builtin__' self.get_metadata_from_fabric.side_effect = Exception self.assertFalse(ds.get_data()) def test_fabric_data_included_in_metadata(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.ds_cfg['agent_command'] = '__builtin__' self.get_metadata_from_fabric.return_value = {'test': 'value'} ret = ds.get_data() self.assertTrue(ret) @@ -419,6 +421,9 @@ class TestAzureBounce(TestCase): self.patches.enter_context( mock.patch.object(DataSourceAzure, 'find_ephemeral_part', mock.MagicMock(return_value=None))) + self.patches.enter_context( + mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', + mock.MagicMock(return_value={}))) def setUp(self): super(TestAzureBounce, self).setUp() -- cgit v1.2.3 From d8a1910ae79478b8976c4950219d37e15640e7e7 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 16:52:12 +0100 Subject: Default to old code path. --- cloudinit/sources/DataSourceAzure.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 3c7820a6..f2388c63 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -42,7 +42,7 @@ BOUNCE_COMMAND = ['sh', '-xc', DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { - 'agent_command': '__builtin__', + 'agent_command': AGENT_START, 'data_dir': "/var/lib/waagent", 'set_hostname': True, 'hostname_bounce': { -- cgit v1.2.3 From 512eb552e0ca740e1d285dc1b66a56579bcf68ec Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 16:52:49 +0100 Subject: Fix retrying. --- cloudinit/sources/helpers/azure.py | 9 +++++++-- tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index dfdfa7c2..2ce728f5 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -242,14 +242,19 @@ class WALinuxAgentShim(object): self.openssl_manager = OpenSSLManager() http_client = AzureEndpointHttpClient(self.openssl_manager.certificate) LOG.info('Registering with Azure...') - for i in range(10): + attempts = 0 + while True: try: response = http_client.get( 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) except Exception: - time.sleep(i + 1) + if attempts < 10: + time.sleep(attempts + 1) + else: + raise else: break + attempts += 1 LOG.debug('Successfully fetched GoalState XML.') goal_state = GoalState(response.contents, http_client) public_keys = [] diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 5fac2ade..23bc997c 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -323,6 +323,8 @@ class TestWALinuxAgentShim(TestCase): mock.patch.object(azure_helper, 'iid_from_shared_config_content')) self.OpenSSLManager = patches.enter_context( mock.patch.object(azure_helper, 'OpenSSLManager')) + patches.enter_context( + mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock())) def test_http_client_uses_certificate(self): shim = azure_helper.WALinuxAgentShim() @@ -402,6 +404,15 @@ class TestWALinuxAgentShim(TestCase): self.assertEqual( 1, self.OpenSSLManager.return_value.clean_up.call_count) + def test_failure_to_fetch_goalstate_bubbles_up(self): + class SentinelException(Exception): + pass + self.AzureEndpointHttpClient.return_value.get.side_effect = ( + SentinelException) + shim = azure_helper.WALinuxAgentShim() + self.assertRaises(SentinelException, + shim.register_with_azure_and_fetch_data) + class TestGetMetadataFromFabric(TestCase): -- cgit v1.2.3 From dad01d2cf14a7e0bdca455040fb5a173775cefdc Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 8 May 2015 16:52:58 +0100 Subject: Python 2.6 fixes. --- cloudinit/sources/helpers/azure.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 2ce728f5..281d733e 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -246,7 +246,7 @@ class WALinuxAgentShim(object): while True: try: response = http_client.get( - 'http://{}/machine/?comp=goalstate'.format(self.endpoint)) + 'http://{0}/machine/?comp=goalstate'.format(self.endpoint)) except Exception: if attempts < 10: time.sleep(attempts + 1) @@ -278,7 +278,7 @@ class WALinuxAgentShim(object): instance_id=goal_state.instance_id, ) http_client.post( - "http://{}/machine?comp=health".format(self.endpoint), + "http://{0}/machine?comp=health".format(self.endpoint), data=document, extra_headers={'Content-Type': 'text/xml; charset=utf-8'}, ) -- cgit v1.2.3 From 8af1802c9971ec1f2ebac23e9b42d5b42f43afae Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 22 May 2015 10:28:17 -0600 Subject: AZURE: Redact on-disk user password in /var/lib/ovf-env.xml The fabric provides the user password in plain text via the CDROM, and cloud-init has previously wrote the ovf-env.xml in /var/lib/waagent with the password in plain text. This change redacts the password. --- cloudinit/sources/DataSourceAzure.py | 28 ++++++++-- tests/unittests/test_datasource/test_azure.py | 73 ++++++++++++++++++++++++--- 2 files changed, 91 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index f2388c63..d0a882ca 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -23,6 +23,8 @@ import fnmatch import os import os.path import time +import xml.etree.ElementTree as ET + from xml.dom import minidom from cloudinit import log as logging @@ -68,6 +70,10 @@ BUILTIN_CLOUD_CONFIG = { DS_CFG_PATH = ['datasource', DS_NAME] DEF_EPHEMERAL_LABEL = 'Temporary Storage' +# The redacted password fails to meet password complexity requirements +# so we can safely use this to mask/redact the password in the ovf-env.xml +DEF_PASSWD_REDACTION = 'REDACTED' + def get_hostname(hostname_command='hostname'): return util.subp(hostname_command, capture=True)[0].strip() @@ -414,14 +420,30 @@ def wait_for_files(flist, maxwait=60, naplen=.5): def write_files(datadir, files, dirmode=None): + + def _redact_password(cnt, fname): + """Azure provides the UserPassword in plain text. So we redact it""" + try: + root = ET.fromstring(cnt) + for elem in root.iter(): + if ('UserPassword' in elem.tag and + elem.text != DEF_PASSWD_REDACTION): + elem.text = DEF_PASSWD_REDACTION + return ET.tostring(root) + except Exception as e: + LOG.critical("failed to redact userpassword in {}".format(fname)) + return cnt + if not datadir: return if not files: files = {} util.ensure_dir(datadir, dirmode) for (name, content) in files.items(): - util.write_file(filename=os.path.join(datadir, name), - content=content, mode=0o600) + fname = os.path.join(datadir, name) + if 'ovf-env.xml' in name: + content = _redact_password(content, fname) + util.write_file(filename=fname, content=content, mode=0o600) def invoke_agent(cmd): @@ -576,7 +598,7 @@ def read_azure_ovf(contents): defuser = {} if username: defuser['name'] = username - if password: + if password and DEF_PASSWD_REDACTION != password: defuser['passwd'] = encrypt_pass(password) defuser['lock_passwd'] = False diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 4c4b8eec..33b971f6 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -18,6 +18,7 @@ import stat import yaml import shutil import tempfile +import xml.etree.ElementTree as ET def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): @@ -144,6 +145,39 @@ class TestAzureDataSource(TestCase): return dsrc + def xml_equals(self, oxml, nxml): + """Compare two sets of XML to make sure they are equal""" + + def create_tag_index(xml): + et = ET.fromstring(xml) + ret = {} + for x in et.iter(): + ret[x.tag] = x + return ret + + def tags_exists(x, y): + for tag in x.keys(): + self.assertIn(tag, y) + for tag in y.keys(): + self.assertIn(tag, x) + + def tags_equal(x, y): + for x_tag, x_val in x.items(): + y_val = y.get(x_val.tag) + self.assertEquals(x_val.text, y_val.text) + + old_cnt = create_tag_index(oxml) + new_cnt = create_tag_index(nxml) + tags_exists(old_cnt, new_cnt) + tags_equal(old_cnt, new_cnt) + + def xml_notequals(self, oxml, nxml): + try: + self.xml_equals(oxml, nxml) + except AssertionError as e: + return + raise AssertionError("XML is the same") + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), @@ -322,6 +356,31 @@ class TestAzureDataSource(TestCase): self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw) + def test_password_redacted_in_ovf(self): + odata = {'HostName': "myhost", 'UserName': "myuser", + 'UserPassword': "mypass"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata)} + dsrc = self._get_ds(data) + ret = dsrc.get_data() + + self.assertTrue(ret) + ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') + + # The XML should not be same since the user password is redacted + on_disk_ovf = load_file(ovf_env_path) + self.xml_notequals(data['ovfcontent'], on_disk_ovf) + + # Make sure that the redacted password on disk is not used by CI + self.assertNotEquals(dsrc.cfg.get('password'), + DataSourceAzure.DEF_PASSWD_REDACTION) + + # Make sure that the password was really encrypted + et = ET.fromstring(on_disk_ovf) + for elem in et.iter(): + if 'UserPassword' in elem.tag: + self.assertEquals(DataSourceAzure.DEF_PASSWD_REDACTION, + elem.text) + def test_ovf_env_arrives_in_waagent_dir(self): xml = construct_valid_ovf_env(data={}, userdata="FOODATA") dsrc = self._get_ds({'ovfcontent': xml}) @@ -331,7 +390,7 @@ class TestAzureDataSource(TestCase): # we expect that the ovf-env.xml file is copied there. ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml') self.assertTrue(os.path.exists(ovf_env_path)) - self.assertEqual(xml, load_file(ovf_env_path)) + self.xml_equals(xml, load_file(ovf_env_path)) def test_ovf_can_include_unicode(self): xml = construct_valid_ovf_env(data={}) @@ -380,12 +439,12 @@ class TestAzureDataSource(TestCase): self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA") self.assertTrue(os.path.exists( os.path.join(self.waagent_d, 'otherfile'))) - self.assertFalse( - os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml'))) - self.assertTrue( - os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual(new_ovfenv, - load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))) + self.assertFalse(os.path.exists( + os.path.join(self.waagent_d, 'SharedConfig.xml'))) + self.assertTrue(os.path.exists( + os.path.join(self.waagent_d, 'ovf-env.xml'))) + new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml')) + self.xml_equals(new_ovfenv, new_xml) def test_exception_fetching_fabric_data_doesnt_propagate(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) -- cgit v1.2.3 From ba7fc871f2e73e0adbf883ef8253180f41cdcfe8 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 16 Jun 2015 17:35:03 +0100 Subject: Use wget to fetch CloudStack passwords. Different versions of the CloudStack password server respond differently; wget handles these nicely for us, so it's easier to just use wget. LP: #1440263, #1464253 --- cloudinit/sources/DataSourceCloudStack.py | 35 +++++++--------------- tests/unittests/test_datasource/test_cloudstack.py | 30 +++++++++---------- 2 files changed, 25 insertions(+), 40 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index 7b32e1fa..d0cac5bb 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -29,8 +29,6 @@ import time from socket import inet_ntoa from struct import pack -from six.moves import http_client - from cloudinit import ec2_utils as ec2 from cloudinit import log as logging from cloudinit import url_helper as uhelp @@ -47,35 +45,22 @@ class CloudStackPasswordServerClient(object): has documentation about the system. This implementation is following that found at https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian - - The CloudStack password server is, essentially, a broken HTTP - server. It requires us to provide a valid HTTP request (including a - DomU_Request header, which is the meat of the request), but just - writes the text of its response on to the socket, without a status - line or any HTTP headers. This makes HTTP libraries sad, which - explains the screwiness of the implementation of this class. - - This should be fixed in CloudStack by commit - a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014. """ def __init__(self, virtual_router_address): self.virtual_router_address = virtual_router_address def _do_request(self, domu_request): - # We have to provide a valid HTTP request, but a valid HTTP - # response is not returned. This means that getresponse() chokes, - # so we use the socket directly to read off the response. - # Because we're reading off the socket directly, we can't re-use the - # connection. - conn = http_client.HTTPConnection(self.virtual_router_address, 8080) - try: - conn.request('GET', '', headers={'DomU_Request': domu_request}) - conn.sock.settimeout(30) - output = conn.sock.recv(1024).decode('utf-8').strip() - finally: - conn.close() - return output + # The password server was in the past, a broken HTTP server, but is now + # fixed. wget handles this seamlessly, so it's easier to shell out to + # that rather than write our own handling code. + output, _ = util.subp([ + 'wget', '--quiet', '--tries', '3', '--timeout', '20', + '--output-document', '-', '--header', + 'DomU_Request: {0}'.format(domu_request), + '{0}:8080'.format(self.virtual_router_address) + ]) + return output.strip() def get_password(self): password = self._do_request('send_my_password') diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py index 959d78ae..656d80d1 100644 --- a/tests/unittests/test_datasource/test_cloudstack.py +++ b/tests/unittests/test_datasource/test_cloudstack.py @@ -23,13 +23,11 @@ class TestCloudStackPasswordFetching(TestCase): self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name))) def _set_password_server_response(self, response_string): - http_client = mock.MagicMock() - http_client.HTTPConnection.return_value.sock.recv.return_value = \ - response_string.encode('utf-8') + subp = mock.MagicMock(return_value=(response_string, '')) self.patches.enter_context( - mock.patch('cloudinit.sources.DataSourceCloudStack.http_client', - http_client)) - return http_client + mock.patch('cloudinit.sources.DataSourceCloudStack.util.subp', + subp)) + return subp def test_empty_password_doesnt_create_config(self): self._set_password_server_response('') @@ -55,26 +53,28 @@ class TestCloudStackPasswordFetching(TestCase): ds = DataSourceCloudStack({}, None, helpers.Paths({})) self.assertTrue(ds.get_data()) - def assertRequestTypesSent(self, http_client, expected_request_types): - request_types = [ - kwargs['headers']['DomU_Request'] - for _, kwargs - in http_client.HTTPConnection.return_value.request.call_args_list] + def assertRequestTypesSent(self, subp, expected_request_types): + request_types = [] + for call in subp.call_args_list: + args = call[0][0] + for arg in args: + if arg.startswith('DomU_Request'): + request_types.append(arg.split()[1]) self.assertEqual(expected_request_types, request_types) def test_valid_response_means_password_marked_as_saved(self): password = 'SekritSquirrel' - http_client = self._set_password_server_response(password) + subp = self._set_password_server_response(password) ds = DataSourceCloudStack({}, None, helpers.Paths({})) ds.get_data() - self.assertRequestTypesSent(http_client, + self.assertRequestTypesSent(subp, ['send_my_password', 'saved_password']) def _check_password_not_saved_for(self, response_string): - http_client = self._set_password_server_response(response_string) + subp = self._set_password_server_response(response_string) ds = DataSourceCloudStack({}, None, helpers.Paths({})) ds.get_data() - self.assertRequestTypesSent(http_client, ['send_my_password']) + self.assertRequestTypesSent(subp, ['send_my_password']) def test_password_not_saved_if_empty(self): self._check_password_not_saved_for('') -- cgit v1.2.3 From afb5421ee717174b989bfed61333f2073b3f3f50 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Mon, 6 Jul 2015 15:33:33 +0100 Subject: Return a sensible value for DataSourceGCE.availability_zone. --- cloudinit/sources/DataSourceGCE.py | 4 ++++ tests/unittests/test_datasource/test_gce.py | 5 +++++ 2 files changed, 9 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index f4ed915d..1b28a68c 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -116,6 +116,10 @@ class DataSourceGCE(sources.DataSource): lines = self.metadata['public-keys'].splitlines() self.metadata['public-keys'] = [self._trim_key(k) for k in lines] + if self.metadata['availability-zone']: + self.metadata['availability-zone'] = self.metadata[ + 'availability-zone'].split('/')[-1] + encoding = self.metadata.get('user-data-encoding') if encoding: if encoding == 'base64': diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index 98b68f09..fa714070 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -159,3 +159,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): self.ds.get_data() self.assertEqual([key_content], self.ds.get_public_ssh_keys()) + + def test_only_last_part_of_zone_used_for_availability_zone(self): + _set_mock_metadata() + self.ds.get_data() + self.assertEqual('bar', self.ds.availability_zone) -- cgit v1.2.3 From 9461b1235f7278440ffb84f1e3d95b3f906e444b Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 21 Jul 2015 13:06:11 +0100 Subject: Use /dev/disk devices for Azure ephemeral disk. The ephemeral disk will not necessarily be assigned the same name at each boot (LP: #1411582), so we use some udev rules to ensure we always get the right one. --- cloudinit/sources/DataSourceAzure.py | 39 ++++++++++++++------------- tests/unittests/test_datasource/test_azure.py | 6 +++-- 2 files changed, 25 insertions(+), 20 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d0a882ca..1193d88b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -254,7 +254,7 @@ class DataSourceAzureNet(sources.DataSource): self.metadata.update(fabric_data) - found_ephemeral = find_ephemeral_disk() + found_ephemeral = find_fabric_formatted_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral LOG.debug("using detected ephemeral0 of %s", found_ephemeral) @@ -276,30 +276,33 @@ def count_files(mp): return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) -def find_ephemeral_part(): +def find_fabric_formatted_ephemeral_part(): """ - Locate the default ephmeral0.1 device. This will be the first device - that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure - gets more ephemeral devices, this logic will only identify the first - such device. + Locate the first fabric formatted ephemeral device. """ - c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL) - c_fstype_devs = util.find_devs_with("TYPE=ntfs") - for dev in c_label_devs: - if dev in c_fstype_devs: - return dev + potential_locations = ['/dev/disk/cloud/azure_resource-part1', + '/dev/disk/azure/resource-part1'] + device_location = None + for potential_location in potential_locations: + if os.path.exists(potential_location): + device_location = potential_location + break + if device_location is None: + return None + ntfs_devices = util.find_devs_with("TYPE=ntfs") + real_device = os.path.realpath(device_location) + if real_device in ntfs_devices: + return device_location return None -def find_ephemeral_disk(): +def find_fabric_formatted_ephemeral_disk(): """ Get the ephemeral disk. """ - part_dev = find_ephemeral_part() - if part_dev and str(part_dev[-1]).isdigit(): - return part_dev[:-1] - elif part_dev: - return part_dev + part_dev = find_fabric_formatted_ephemeral_part() + if part_dev: + return part_dev.split('-')[0] return None @@ -313,7 +316,7 @@ def support_new_ephemeral(cfg): new ephemeral device is detected, cloud-init overrides the default frequency for both disk-setup and mounts for the current boot only. """ - device = find_ephemeral_part() + device = find_fabric_formatted_ephemeral_part() if not device: LOG.debug("no default fabric formated ephemeral0.1 found") return None diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 33b971f6..3b7e3293 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -475,10 +475,12 @@ class TestAzureBounce(TestCase): mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'find_ephemeral_disk', + mock.patch.object(DataSourceAzure, + 'find_fabric_formatted_ephemeral_disk', mock.MagicMock(return_value=None))) self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'find_ephemeral_part', + mock.patch.object(DataSourceAzure, + 'find_fabric_formatted_ephemeral_part', mock.MagicMock(return_value=None))) self.patches.enter_context( mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', -- cgit v1.2.3 From b5230bc3e9d65692093cae9d2f4ca628435a382b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 21 Jul 2015 12:36:53 -0400 Subject: fix 'make pyflakes' --- cloudinit/sources/DataSourceAzure.py | 2 +- tests/unittests/test_datasource/test_azure.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index d0a882ca..2ce85637 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -430,7 +430,7 @@ def write_files(datadir, files, dirmode=None): elem.text != DEF_PASSWD_REDACTION): elem.text = DEF_PASSWD_REDACTION return ET.tostring(root) - except Exception as e: + except Exception: LOG.critical("failed to redact userpassword in {}".format(fname)) return cnt diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 33b971f6..d632bcb9 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -174,7 +174,7 @@ class TestAzureDataSource(TestCase): def xml_notequals(self, oxml, nxml): try: self.xml_equals(oxml, nxml) - except AssertionError as e: + except AssertionError: return raise AssertionError("XML is the same") -- cgit v1.2.3 From 73c5bbfa31b922a0ba403216c0fc1f63b22a9262 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 22 Jul 2015 13:06:34 +0100 Subject: Make full data source available to code that handles mirror selection. --- cloudinit/distros/__init__.py | 15 +++++++-------- cloudinit/sources/__init__.py | 3 +-- tests/unittests/test_distros/test_generic.py | 22 +++++++++++++++------- 3 files changed, 23 insertions(+), 17 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 8a947867..47b76c68 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -117,12 +117,11 @@ class Distro(object): arch = self.get_primary_arch() return _get_arch_package_mirror_info(mirror_info, arch) - def get_package_mirror_info(self, arch=None, - availability_zone=None): + def get_package_mirror_info(self, arch=None, data_source=None): # This resolves the package_mirrors config option # down to a single dict of {mirror_name: mirror_url} arch_info = self._get_arch_package_mirror_info(arch) - return _get_package_mirror_info(availability_zone=availability_zone, + return _get_package_mirror_info(data_source=data_source, mirror_info=arch_info) def apply_network(self, settings, bring_up=True): @@ -556,7 +555,7 @@ class Distro(object): LOG.info("Added user '%s' to group '%s'" % (member, name)) -def _get_package_mirror_info(mirror_info, availability_zone=None, +def _get_package_mirror_info(mirror_info, data_source=None, mirror_filter=util.search_for_mirror): # given a arch specific 'mirror_info' entry (from package_mirrors) # search through the 'search' entries, and fallback appropriately @@ -572,11 +571,11 @@ def _get_package_mirror_info(mirror_info, availability_zone=None, ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re) subst = {} - if availability_zone: - subst['availability_zone'] = availability_zone + if data_source and data_source.availability_zone: + subst['availability_zone'] = data_source.availability_zone - if availability_zone and re.match(ec2_az_re, availability_zone): - subst['ec2_region'] = "%s" % availability_zone[0:-1] + if re.match(ec2_az_re, data_source.availability_zone): + subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] results = {} for (name, mirror) in mirror_info.get('failsafe', {}).items(): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 39eab51b..1a036638 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -210,8 +210,7 @@ class DataSource(object): return hostname def get_package_mirror_info(self): - return self.distro.get_package_mirror_info( - availability_zone=self.availability_zone) + return self.distro.get_package_mirror_info(data_source=self) def normalize_pubkey_data(pubkey_data): diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 8e3bd78a..6ed1704c 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -7,6 +7,11 @@ import os import shutil import tempfile +try: + from unittest import mock +except ImportError: + import mock + unknown_arch_info = { 'arches': ['default'], 'failsafe': {'primary': 'http://fs-primary-default', @@ -144,33 +149,35 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): def test_get_package_mirror_info_az_ec2(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone="us-east-1a") - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://us-east-1.ec2/', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_second) self.assertEqual(results, {'primary': 'http://us-east-1a.clouds/', 'security': 'http://security-mirror2-intel'}) - results = gpmi(arch_mirrors, availability_zone="us-east-1a", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_none) self.assertEqual(results, package_mirrors[0]['failsafe']) def test_get_package_mirror_info_az_non_ec2(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone="nova.cloudvendor") - results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://nova.cloudvendor.clouds/', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor", + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_last) self.assertEqual(results, {'primary': 'http://nova.cloudvendor.clouds/', @@ -178,17 +185,18 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): def test_get_package_mirror_info_none(self): arch_mirrors = gapmi(package_mirrors, arch="amd64") + data_source_mock = mock.Mock(availability_zone=None) # because both search entries here replacement based on # availability-zone, the filter will be called with an empty list and # failsafe should be taken. - results = gpmi(arch_mirrors, availability_zone=None, + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_first) self.assertEqual(results, {'primary': 'http://fs-primary-intel', 'security': 'http://security-mirror1-intel'}) - results = gpmi(arch_mirrors, availability_zone=None, + results = gpmi(arch_mirrors, data_source=data_source_mock, mirror_filter=self.return_last) self.assertEqual(results, {'primary': 'http://fs-primary-intel', -- cgit v1.2.3 From bc7d57a0ae827978c87919c833bb5e8d2d5143c6 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 22 Jul 2015 13:06:34 +0100 Subject: Add DataSource.region and use it in mirror selection. Also implement DataSource.region for EC2 and GCE data sources. --- cloudinit/distros/__init__.py | 3 +++ cloudinit/sources/DataSourceEc2.py | 7 +++++++ cloudinit/sources/DataSourceGCE.py | 4 ++++ cloudinit/sources/__init__.py | 4 ++++ config/cloud.cfg | 1 + 5 files changed, 19 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 47b76c68..71884b32 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -577,6 +577,9 @@ def _get_package_mirror_info(mirror_info, data_source=None, if re.match(ec2_az_re, data_source.availability_zone): subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1] + if data_source and data_source.region: + subst['region'] = data_source.region + results = {} for (name, mirror) in mirror_info.get('failsafe', {}).items(): results[name] = mirror diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 798869b7..0032d06c 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -197,6 +197,13 @@ class DataSourceEc2(sources.DataSource): except KeyError: return None + @property + def region(self): + az = self.availability_zone + if az is not None: + return az[:-1] + return None + # Used to match classes to dependencies datasources = [ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 1b28a68c..7e7fc033 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -152,6 +152,10 @@ class DataSourceGCE(sources.DataSource): def availability_zone(self): return self.metadata['availability-zone'] + @property + def region(self): + return self.availability_zone.rsplit('-', 1)[0] + # Used to match classes to dependencies datasources = [ (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 1a036638..a21c08c2 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -157,6 +157,10 @@ class DataSource(object): return self.metadata.get('availability-zone', self.metadata.get('availability_zone')) + @property + def region(self): + return self.metadata.get('region') + def get_instance_id(self): if not self.metadata or 'instance-id' not in self.metadata: # Return a magic not really instance id string diff --git a/config/cloud.cfg b/config/cloud.cfg index e96e1781..2b27f379 100644 --- a/config/cloud.cfg +++ b/config/cloud.cfg @@ -104,6 +104,7 @@ system_info: primary: - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/ - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/ + - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/ security: [] - arches: [armhf, armel, default] failsafe: -- cgit v1.2.3 From b5574a9925b29417a1b351e7b38c54bc7d144dba Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 30 Jul 2015 18:06:01 -0400 Subject: tests pass --- bin/cloud-init | 28 ++++++++++-- cloudinit/reporting.py | 91 +++++++++++++++++++++++++++++++++++---- cloudinit/sources/__init__.py | 16 ++++--- cloudinit/stages.py | 10 ++++- tests/unittests/test_reporting.py | 14 +++--- 5 files changed, 134 insertions(+), 25 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index 1d3e7ee3..7f21e49f 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -46,6 +46,7 @@ from cloudinit import sources from cloudinit import stages from cloudinit import templater from cloudinit import util +from cloudinit import reporting from cloudinit import version from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, @@ -313,7 +314,7 @@ def main_modules(action_name, args): # 5. Run the modules for the given stage name # 6. Done! w_msg = welcome_format("%s:%s" % (action_name, name)) - init = stages.Init(ds_deps=[]) + init = stages.Init(ds_deps=[], reporter=args.reporter) # Stage 1 init.read_cfg(extract_fns(args)) # Stage 2 @@ -549,6 +550,8 @@ def main(): ' found (use at your own risk)'), dest='force', default=False) + + parser.set_defaults(reporter=None) subparsers = parser.add_subparsers() # Each action and its sub-options (if any) @@ -595,6 +598,9 @@ def main(): help=("frequency of the module"), required=False, choices=list(FREQ_SHORT_NAMES.keys())) + parser_single.add_argument("--report", action="store_true", + help="enable reporting", + required=False) parser_single.add_argument("module_args", nargs="*", metavar='argument', help=('any additional arguments to' @@ -617,8 +623,24 @@ def main(): if name in ("modules", "init"): functor = status_wrapper - return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, - get_uptime=True, func=functor, args=(name, args)) + reporting = True + if name == "init": + if args.local: + rname, rdesc = ("init-local", "searching for local datasources") + else: + rname, rdesc = ("init-network", "searching for network datasources") + elif name == "modules": + rname, rdesc = ("modules-%s" % args.mode, "running modules for %s") + elif name == "single": + rname, rdesc = ("single/%s" % args.name, + "running single module %s" % args.name) + reporting = args.report + + reporter = reporting.ReportStack(rname, rdesc, reporting=reporting) + with reporter: + return util.log_time( + logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, + get_uptime=True, func=functor, args=(name, args)) if __name__ == '__main__': diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py index d2dd4fec..c925f661 100644 --- a/cloudinit/reporting.py +++ b/cloudinit/reporting.py @@ -20,9 +20,18 @@ START_EVENT_TYPE = 'start' DEFAULT_CONFIG = { 'logging': {'type': 'log'}, + 'print': {'type': 'print'}, } +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError + +status = _nameset(("SUCCESS", "WARN", "FAIL")) + instantiated_handler_registry = DictRegistry() available_handlers = DictRegistry() @@ -43,17 +52,18 @@ class ReportingEvent(object): class FinishReportingEvent(ReportingEvent): - def __init__(self, name, description, successful=None): + def __init__(self, name, description, result=None): super(FinishReportingEvent, self).__init__( FINISH_EVENT_TYPE, name, description) - self.successful = successful + if result is None: + result = status.SUCCESS + self.result = result + if result not in status: + raise ValueError("Invalid result: %s" % result) def as_string(self): - if self.successful is None: - return super(FinishReportingEvent, self).as_string() - success_string = 'success' if self.successful else 'fail' return '{0}: {1}: {2}: {3}'.format( - self.event_type, self.name, success_string, self.description) + self.event_type, self.name, self.result, self.description) class ReportingHandler(object): @@ -73,6 +83,11 @@ class LogHandler(ReportingHandler): logger.info(event.as_string()) +class PrintHandler(ReportingHandler): + def publish_event(self, event): + print(event.as_string()) + + def add_configuration(config): for handler_name, handler_config in config.items(): handler_config = handler_config.copy() @@ -95,12 +110,12 @@ def report_event(event): handler.publish_event(event) -def report_finish_event(event_name, event_description, successful=None): +def report_finish_event(event_name, event_description, result): """Report a "finish" event. See :py:func:`.report_event` for parameter details. """ - event = FinishReportingEvent(event_name, event_description, successful) + event = FinishReportingEvent(event_name, event_description, result) return report_event(event) @@ -118,5 +133,65 @@ def report_start_event(event_name, event_description): return report_event(event) +class ReportStack(object): + def __init__(self, name, description, parent=None, reporting=None, + exc_result=None): + self.parent = parent + self.reporting = reporting + self.name = name + self.description = description + + if exc_result is None: + exc_result = status.FAIL + self.exc_result = exc_result + + if reporting is None: + # if reporting is specified respect it, otherwise use parent's value + if parent: + reporting = parent.reporting + else: + reporting = True + if parent: + self.fullname = '/'.join((name, parent.fullname,)) + else: + self.fullname = self.name + self.children = {} + + def __enter__(self): + self.exception = None + if self.reporting: + report_start_event(self.fullname, self.description) + if self.parent: + self.parent.children[self.name] = (None, None) + return self + + def childrens_finish_info(self, result=None, description=None): + for result in (status.FAIL, status.WARN): + for name, (value, msg) in self.children.items(): + if value == result: + return (result, "[" + name + "]" + msg) + if result is None: + result = status.SUCCESS + if description is None: + description = self.description + return (result, description) + + def finish_info(self, exc): + # return tuple of description, and value + if exc: + # by default, exceptions are fatal + return (self.exc_result, self.description) + return self.childrens_finish_info() + + def __exit__(self, exc_type, exc_value, traceback): + self.exception = exc_value + (result, msg) = self.finish_info(exc_value) + if self.parent: + self.parent.children[self.name] = (result, msg) + if self.reporting: + report_finish_event(self.fullname, msg, result) + + available_handlers.register_item('log', LogHandler) +available_handlers.register_item('print', PrintHandler) add_configuration(DEFAULT_CONFIG) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a21c08c2..c4848d5d 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -27,6 +27,7 @@ import six from cloudinit import importer from cloudinit import log as logging +from cloudinit import reporting from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util @@ -246,17 +247,22 @@ def normalize_pubkey_data(pubkey_data): return keys -def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list): +def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) for cls in ds_list: + myreporter = reporting.ReportStack( + "check-%s" % cls, "searching for %s" % cls, + parent=reporter, exc_result=reporting.status.WARN) + try: - LOG.debug("Seeing if we can get any data from %s", cls) - s = cls(sys_cfg, distro, paths) - if s.get_data(): - return (s, type_utils.obj_name(cls)) + with myreporter: + LOG.debug("Seeing if we can get any data from %s", cls) + s = cls(sys_cfg, distro, paths) + if s.get_data(): + return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d28e765b..dbcdbece 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -46,6 +46,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import type_utils from cloudinit import util +from cloudinit import reporting LOG = logging.getLogger(__name__) @@ -53,7 +54,7 @@ NULL_DATA_SOURCE = None class Init(object): - def __init__(self, ds_deps=None): + def __init__(self, reporter=None, ds_deps=None): if ds_deps is not None: self.ds_deps = ds_deps else: @@ -65,6 +66,11 @@ class Init(object): # Changed only when a fetch occurs self.datasource = NULL_DATA_SOURCE + if reporter is None: + reporter = reporting.ReportStack( + name="init-reporter", description="init-desc", reporting=False) + self.reporter = reporter + def _reset(self, reset_ds=False): # Recreated on access self._cfg = None @@ -246,7 +252,7 @@ class Init(object): self.paths, copy.deepcopy(self.ds_deps), cfg_list, - pkg_list) + pkg_list, self.reporter) LOG.info("Loaded datasource %s - %s", dsname, ds) self.datasource = ds # Ensure we adjust our path members datasource diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index f4011a79..5700118f 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -32,10 +32,10 @@ class TestReportStartEvent(TestCase): class TestReportFinishEvent(TestCase): - def _report_finish_event(self, successful=None): + def _report_finish_event(self, result=None): event_name, event_description = 'my_test_event', 'my description' reporting.report_finish_event( - event_name, event_description, successful=successful) + event_name, event_description, result=result) return event_name, event_description def assertHandlersPassedObjectWithAsString( @@ -51,7 +51,7 @@ class TestReportFinishEvent(TestCase): self, instantiated_handler_registry): event_name, event_description = self._report_finish_event() expected_string_representation = ': '.join( - ['finish', event_name, event_description]) + ['finish', event_name, reporting.status.SUCCESS, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) @@ -61,9 +61,9 @@ class TestReportFinishEvent(TestCase): def test_reporting_successful_finish_has_sensible_string_repr( self, instantiated_handler_registry): event_name, event_description = self._report_finish_event( - successful=True) + result=reporting.status.SUCCESS) expected_string_representation = ': '.join( - ['finish', event_name, 'success', event_description]) + ['finish', event_name, reporting.status.SUCCESS, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) @@ -73,9 +73,9 @@ class TestReportFinishEvent(TestCase): def test_reporting_unsuccessful_finish_has_sensible_string_repr( self, instantiated_handler_registry): event_name, event_description = self._report_finish_event( - successful=False) + result=reporting.status.FAIL) expected_string_representation = ': '.join( - ['finish', event_name, 'fail', event_description]) + ['finish', event_name, reporting.status.FAIL, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) -- cgit v1.2.3 From b22302d8e2b539f61faede7efb3a163966bf170a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 31 Jul 2015 14:38:09 +0000 Subject: fix issues found when testing --- bin/cloud-init | 4 ++-- cloudinit/reporting.py | 16 ++++++++++------ cloudinit/sources/__init__.py | 5 +++-- 3 files changed, 15 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index 6a47e5e8..c808eda5 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -636,8 +636,8 @@ def main(): "running single module %s" % args.name) report_on = args.report - reporter = reporting.ReportStack(rname, rdesc, reporting=report_on) - with reporter: + args.reporter = reporting.ReportStack(rname, rdesc, reporting=report_on) + with args.reporter: return util.log_time( logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, get_uptime=True, func=functor, args=(name, args)) diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py index c925f661..1bd7df0d 100644 --- a/cloudinit/reporting.py +++ b/cloudinit/reporting.py @@ -137,7 +137,6 @@ class ReportStack(object): def __init__(self, name, description, parent=None, reporting=None, exc_result=None): self.parent = parent - self.reporting = reporting self.name = name self.description = description @@ -145,18 +144,23 @@ class ReportStack(object): exc_result = status.FAIL self.exc_result = exc_result + # use parents reporting value if not provided if reporting is None: - # if reporting is specified respect it, otherwise use parent's value if parent: reporting = parent.reporting else: reporting = True + self.reporting = reporting + if parent: - self.fullname = '/'.join((name, parent.fullname,)) + self.fullname = '/'.join((parent.fullname, name,)) else: self.fullname = self.name self.children = {} + def __repr__(self): + return ("%s reporting=%s" % (self.fullname, self.reporting)) + def __enter__(self): self.exception = None if self.reporting: @@ -166,10 +170,10 @@ class ReportStack(object): return self def childrens_finish_info(self, result=None, description=None): - for result in (status.FAIL, status.WARN): + for cand_result in (status.FAIL, status.WARN): for name, (value, msg) in self.children.items(): - if value == result: - return (result, "[" + name + "]" + msg) + if value == cand_result: + return (value, "[" + name + "]" + msg) if result is None: result = status.SUCCESS if description is None: diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c4848d5d..f585c3e4 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -252,9 +252,10 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) - for cls in ds_list: + for i, cls in enumerate(ds_list): + name=ds_names[i].replace("DataSource", "") myreporter = reporting.ReportStack( - "check-%s" % cls, "searching for %s" % cls, + "check-%s" % name, "searching for %s" % name, parent=reporter, exc_result=reporting.status.WARN) try: -- cgit v1.2.3 From cc923ca255f4ce8c23819e263066e34133f3dd31 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 31 Jul 2015 15:23:04 +0000 Subject: add nicer formating and messages for datasource searching --- cloudinit/sources/__init__.py | 33 +++++++++++++++++++++++++++------ 1 file changed, 27 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index f585c3e4..c174a58f 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -247,22 +247,43 @@ def normalize_pubkey_data(pubkey_data): return keys +class SearchReportStack(reporting.ReportStack): + def __init__(self, source, ds_deps, parent): + self.source = source.replace("DataSource", "") + name = "check-%s" % self.source + self.found = False + self.mode = "network" if DEP_NETWORK in ds_deps else "local" + description = "searching for %s data from %s" % ( + self.mode, self.source) + super(SearchReportStack, self).__init__( + name=name, description=description, parent=parent, + exc_result=reporting.status.WARN) + + def finish_info(self, exc): + # return tuple of description, and value + if exc: + # by default, exceptions are fatal + return (self.exc_result, self.description) + if self.found: + description = "found %s data from %s" % (self.mode, self.source) + else: + description = "no %s data found from %s" % (self.mode, self.source) + return self.childrens_finish_info(description=description) + + def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) for i, cls in enumerate(ds_list): - name=ds_names[i].replace("DataSource", "") - myreporter = reporting.ReportStack( - "check-%s" % name, "searching for %s" % name, - parent=reporter, exc_result=reporting.status.WARN) - + srcname=ds_names[i] try: - with myreporter: + with SearchReportStack(srcname, ds_deps, reporter) as rep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): + rep.found = True return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) -- cgit v1.2.3 From f36706442b4c1913ea8f7953993b9e03f3adf623 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 31 Jul 2015 16:12:37 +0000 Subject: address Daniel's comments in review --- bin/cloud-init | 3 ++- cloudinit/reporting.py | 34 +++++++++++++++------------------- cloudinit/sources/__init__.py | 7 +++---- cloudinit/stages.py | 3 ++- 4 files changed, 22 insertions(+), 25 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index c808eda5..d0ac4c7f 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -636,7 +636,8 @@ def main(): "running single module %s" % args.name) report_on = args.report - args.reporter = reporting.ReportStack(rname, rdesc, reporting=report_on) + args.reporter = reporting.ReportStack( + rname, rdesc, reporting_enabled=report_on) with args.reporter: return util.log_time( logfunc=LOG.debug, msg="cloud-init mode '%s'" % name, diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py index 1bd7df0d..154f4e03 100644 --- a/cloudinit/reporting.py +++ b/cloudinit/reporting.py @@ -11,6 +11,7 @@ report events in a structured manner. import abc import logging +import sys from cloudinit.registry import DictRegistry @@ -83,9 +84,9 @@ class LogHandler(ReportingHandler): logger.info(event.as_string()) -class PrintHandler(ReportingHandler): +class StderrHandler(ReportingHandler): def publish_event(self, event): - print(event.as_string()) + sys.stderr.write(event.as_string() + "\n") def add_configuration(config): @@ -134,23 +135,20 @@ def report_start_event(event_name, event_description): class ReportStack(object): - def __init__(self, name, description, parent=None, reporting=None, - exc_result=None): + def __init__(self, name, description, parent=None, + reporting_enabled=None, result_on_exception=status.FAIL): self.parent = parent self.name = name self.description = description - - if exc_result is None: - exc_result = status.FAIL - self.exc_result = exc_result + self.result_on_exception = result_on_exception # use parents reporting value if not provided - if reporting is None: + if reporting_enabled is None: if parent: - reporting = parent.reporting + reporting_enabled = parent.reporting_enabled else: - reporting = True - self.reporting = reporting + reporting_enabled = True + self.reporting_enabled = reporting_enabled if parent: self.fullname = '/'.join((parent.fullname, name,)) @@ -159,11 +157,10 @@ class ReportStack(object): self.children = {} def __repr__(self): - return ("%s reporting=%s" % (self.fullname, self.reporting)) + return ("%s reporting=%s" % (self.fullname, self.reporting_enabled)) def __enter__(self): - self.exception = None - if self.reporting: + if self.reporting_enabled: report_start_event(self.fullname, self.description) if self.parent: self.parent.children[self.name] = (None, None) @@ -184,18 +181,17 @@ class ReportStack(object): # return tuple of description, and value if exc: # by default, exceptions are fatal - return (self.exc_result, self.description) + return (self.result_on_exception, self.description) return self.childrens_finish_info() def __exit__(self, exc_type, exc_value, traceback): - self.exception = exc_value (result, msg) = self.finish_info(exc_value) if self.parent: self.parent.children[self.name] = (result, msg) - if self.reporting: + if self.reporting_enabled: report_finish_event(self.fullname, msg, result) available_handlers.register_item('log', LogHandler) -available_handlers.register_item('print', PrintHandler) +available_handlers.register_item('print', StderrHandler) add_configuration(DEFAULT_CONFIG) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c174a58f..0dc75f9e 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -257,7 +257,7 @@ class SearchReportStack(reporting.ReportStack): self.mode, self.source) super(SearchReportStack, self).__init__( name=name, description=description, parent=parent, - exc_result=reporting.status.WARN) + result_on_exception=reporting.status.WARN) def finish_info(self, exc): # return tuple of description, and value @@ -276,10 +276,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_names = [type_utils.obj_name(f) for f in ds_list] LOG.debug("Searching for data source in: %s", ds_names) - for i, cls in enumerate(ds_list): - srcname=ds_names[i] + for name, cls in zip(ds_names, ds_list): try: - with SearchReportStack(srcname, ds_deps, reporter) as rep: + with SearchReportStack(name, ds_deps, reporter) as rep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 2bf7a1c4..82197d02 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -68,7 +68,8 @@ class Init(object): if reporter is None: reporter = reporting.ReportStack( - name="init-reporter", description="init-desc", reporting=False) + name="init-reporter", description="init-desc", + reporting_enabled=False) self.reporter = reporter def _reset(self, reset_ds=False): -- cgit v1.2.3 From 4f4e6d1cf90928daa1ab339f687b3319454aefdd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 31 Jul 2015 16:31:26 +0000 Subject: move 'mode' out of SearchReportStack --- cloudinit/sources/__init__.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 0dc75f9e..6f2d2276 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -248,13 +248,12 @@ def normalize_pubkey_data(pubkey_data): class SearchReportStack(reporting.ReportStack): - def __init__(self, source, ds_deps, parent): + def __init__(self, source, mode, parent): self.source = source.replace("DataSource", "") name = "check-%s" % self.source self.found = False - self.mode = "network" if DEP_NETWORK in ds_deps else "local" - description = "searching for %s data from %s" % ( - self.mode, self.source) + self.mode = mode + description = "searching for %s data from %s" % (mode, self.source) super(SearchReportStack, self).__init__( name=name, description=description, parent=parent, result_on_exception=reporting.status.WARN) @@ -274,11 +273,12 @@ class SearchReportStack(reporting.ReportStack): def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [type_utils.obj_name(f) for f in ds_list] - LOG.debug("Searching for data source in: %s", ds_names) + mode = "network" if DEP_NETWORK in ds_deps else "local" + LOG.debug("Searching for %s data source in: %s", mode, ds_names) for name, cls in zip(ds_names, ds_list): try: - with SearchReportStack(name, ds_deps, reporter) as rep: + with SearchReportStack(name, mode, reporter) as rep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): -- cgit v1.2.3 From 07b452e166b5d2ff34d5558b1dbba42ab0f1f23c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 31 Jul 2015 19:27:52 +0000 Subject: plumb the rest the reporting through --- bin/cloud-init | 9 +++++---- cloudinit/cloud.py | 8 +++++++- cloudinit/reporting.py | 32 +++++++++++++++++++++----------- cloudinit/sources/__init__.py | 32 +++++++------------------------- cloudinit/stages.py | 29 ++++++++++++++++++++++++----- 5 files changed, 64 insertions(+), 46 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index de3b9fbf..d369a806 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -284,7 +284,7 @@ def main_init(name, args): return (init.datasource, ["Consuming user data failed!"]) # Stage 8 - re-read and apply relevant cloud-config to include user-data - mods = stages.Modules(init, extract_fns(args)) + mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) # Stage 9 try: outfmt_orig = outfmt @@ -329,7 +329,7 @@ def main_modules(action_name, args): if not args.force: return [(msg)] # Stage 3 - mods = stages.Modules(init, extract_fns(args)) + mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) # Stage 4 try: LOG.debug("Closing stdin") @@ -384,7 +384,7 @@ def main_single(name, args): if not args.force: return 1 # Stage 3 - mods = stages.Modules(init, extract_fns(args)) + mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) mod_args = args.module_args if mod_args: LOG.debug("Using passed in arguments %s", mod_args) @@ -630,7 +630,8 @@ def main(): else: rname, rdesc = ("init-network", "searching for network datasources") elif name == "modules": - rname, rdesc = ("modules-%s" % args.mode, "running modules for %s") + rname, rdesc = ("modules-%s" % args.mode, + "running modules for %s" % args.mode) elif name == "single": rname, rdesc = ("single/%s" % args.name, "running single module %s" % args.name) diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 95e0cfb2..71eb80eb 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -40,12 +40,18 @@ LOG = logging.getLogger(__name__) class Cloud(object): - def __init__(self, datasource, paths, cfg, distro, runners): + def __init__(self, datasource, paths, cfg, distro, runners, reporter=None): self.datasource = datasource self.paths = paths self.distro = distro self._cfg = cfg self._runners = runners + if reporter is None: + reporter = reporting.ReportStack( + name="unnamed-cloud-reporter", + description="unnamed-cloud-reporter", + reporting_enabled=False) + self.reporter = reporter # If a 'user' manipulates logging or logging services # it is typically useful to cause the logging to be diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py index 154f4e03..08014c70 100644 --- a/cloudinit/reporting.py +++ b/cloudinit/reporting.py @@ -86,7 +86,8 @@ class LogHandler(ReportingHandler): class StderrHandler(ReportingHandler): def publish_event(self, event): - sys.stderr.write(event.as_string() + "\n") + #sys.stderr.write(event.as_string() + "\n") + print(event.as_string()) def add_configuration(config): @@ -135,12 +136,14 @@ def report_start_event(event_name, event_description): class ReportStack(object): - def __init__(self, name, description, parent=None, + def __init__(self, name, description, message=None, parent=None, reporting_enabled=None, result_on_exception=status.FAIL): self.parent = parent self.name = name self.description = description + self.message = message self.result_on_exception = result_on_exception + self.result = None # use parents reporting value if not provided if reporting_enabled is None: @@ -160,28 +163,35 @@ class ReportStack(object): return ("%s reporting=%s" % (self.fullname, self.reporting_enabled)) def __enter__(self): + self.result = None if self.reporting_enabled: report_start_event(self.fullname, self.description) if self.parent: self.parent.children[self.name] = (None, None) return self - def childrens_finish_info(self, result=None, description=None): + def childrens_finish_info(self): for cand_result in (status.FAIL, status.WARN): for name, (value, msg) in self.children.items(): if value == cand_result: return (value, "[" + name + "]" + msg) - if result is None: - result = status.SUCCESS - if description is None: - description = self.description - return (result, description) - + return (self.result, self.message) + + @property + def message(self): + if self._message is not None: + return self._message + return self.description + + @message.setter + def message(self, value): + self._message = value + + def finish_info(self, exc): # return tuple of description, and value if exc: - # by default, exceptions are fatal - return (self.result_on_exception, self.description) + return (self.result_on_exception, self.message) return self.childrens_finish_info() def __exit__(self, exc_type, exc_value, traceback): diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 6f2d2276..3b48f173 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -247,29 +247,6 @@ def normalize_pubkey_data(pubkey_data): return keys -class SearchReportStack(reporting.ReportStack): - def __init__(self, source, mode, parent): - self.source = source.replace("DataSource", "") - name = "check-%s" % self.source - self.found = False - self.mode = mode - description = "searching for %s data from %s" % (mode, self.source) - super(SearchReportStack, self).__init__( - name=name, description=description, parent=parent, - result_on_exception=reporting.status.WARN) - - def finish_info(self, exc): - # return tuple of description, and value - if exc: - # by default, exceptions are fatal - return (self.exc_result, self.description) - if self.found: - description = "found %s data from %s" % (self.mode, self.source) - else: - description = "no %s data found from %s" % (self.mode, self.source) - return self.childrens_finish_info(description=description) - - def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): ds_list = list_sources(cfg_list, ds_deps, pkg_list) ds_names = [type_utils.obj_name(f) for f in ds_list] @@ -277,12 +254,17 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): LOG.debug("Searching for %s data source in: %s", mode, ds_names) for name, cls in zip(ds_names, ds_list): + myrep = reporting.ReportStack( + name="search-%s-%s" % (mode, name.replace("DataSource", "")), + description="searching for %s data from %s" % (mode, name), + message = "no %s data found from %s" % (mode, name), + parent=reporter) try: - with SearchReportStack(name, mode, reporter) as rep: + with myrep: LOG.debug("Seeing if we can get any data from %s", cls) s = cls(sys_cfg, distro, paths) if s.get_data(): - rep.found = True + myrep.message = "found %s data from %s" % (mode, name) return (s, type_utils.obj_name(cls)) except Exception: util.logexc(LOG, "Getting data from %s failed", cls) diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 79d22538..8c79ae4e 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -341,7 +341,8 @@ class Init(object): # Form the needed options to cloudify our members return cloud.Cloud(self.datasource, self.paths, self.cfg, - self.distro, helpers.Runners(self.paths)) + self.distro, helpers.Runners(self.paths), + reporter=self.reporter) def update(self): if not self._write_to_cache(): @@ -507,8 +508,14 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - self._consume_userdata(frequency) - self._consume_vendordata(frequency) + with reporting.ReportStack( + "consume-user-data", "reading and applying user-data", + parent=self.reporter): + self._consume_userdata(frequency) + with reporting.ReportStack( + "consume-vendor-data", "reading and applying vendor-data", + parent=self.reporter): + self._consume_userdata(frequency) # Perform post-consumption adjustments so that # modules that run during the init stage reflect @@ -581,11 +588,12 @@ class Init(object): class Modules(object): - def __init__(self, init, cfg_files=None): + def __init__(self, init, cfg_files=None, reporter=None): self.init = init self.cfg_files = cfg_files # Created on first use self._cached_cfg = None + self.reporter = reporter @property def cfg(self): @@ -695,7 +703,18 @@ class Modules(object): which_ran.append(name) # This name will affect the semaphore name created run_name = "config-%s" % (name) - cc.run(run_name, mod.handle, func_args, freq=freq) + + desc="running %s with frequency %s" % (run_name, freq) + myrep = reporting.ReportStack( + name=run_name, description=desc, parent=self.reporter) + + with myrep: + ran, _r = cc.run(run_name, mod.handle, func_args, freq=freq) + if ran: + myrep.message = "%s ran successfully" % run_name + else: + myrep.message = "%s previously ran" % run_name + except Exception as e: util.logexc(LOG, "Running module %s (%s) failed", name, mod) failures.append((name, e)) -- cgit v1.2.3 From 89c564a6fd5ac89869f83541370557e3fa58495c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 2 Aug 2015 17:51:40 -0400 Subject: fix tests from sync change ReportStack to ReportEventStack change default ReportEventStack to be status.SUCCESS instead of None --- bin/cloud-init | 3 +- cloudinit/cloud.py | 2 +- cloudinit/reporting/__init__.py | 91 +++++++++++++++++++++++++++++++++++---- cloudinit/reporting/handlers.py | 7 +++ cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 10 ++--- tests/unittests/test_reporting.py | 19 ++++---- 7 files changed, 109 insertions(+), 25 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index d369a806..51253c42 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -637,7 +637,8 @@ def main(): "running single module %s" % args.name) report_on = args.report - args.reporter = reporting.ReportStack( + reporting.add_configuration({'print': {'type': 'print'}}) + args.reporter = reporting.ReportEventStack( rname, rdesc, reporting_enabled=report_on) with args.reporter: return util.log_time( diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index 71eb80eb..a0fb42a3 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -47,7 +47,7 @@ class Cloud(object): self._cfg = cfg self._runners = runners if reporter is None: - reporter = reporting.ReportStack( + reporter = reporting.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", reporting_enabled=False) diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py index b0364eec..78dde715 100644 --- a/cloudinit/reporting/__init__.py +++ b/cloudinit/reporting/__init__.py @@ -22,6 +22,15 @@ DEFAULT_CONFIG = { instantiated_handler_registry = DictRegistry() +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError("%s not a valid value" % name) + + +status = _nameset(("SUCCESS", "WARN", "FAIL")) + class ReportingEvent(object): """Encapsulation of event formatting.""" @@ -39,17 +48,16 @@ class ReportingEvent(object): class FinishReportingEvent(ReportingEvent): - def __init__(self, name, description, successful=None): + def __init__(self, name, description, result=status.SUCCESS): super(FinishReportingEvent, self).__init__( FINISH_EVENT_TYPE, name, description) - self.successful = successful + self.result = result + if result not in status: + raise ValueError("Invalid result: %s" % result) def as_string(self): - if self.successful is None: - return super(FinishReportingEvent, self).as_string() - success_string = 'success' if self.successful else 'fail' return '{0}: {1}: {2}: {3}'.format( - self.event_type, self.name, success_string, self.description) + self.event_type, self.name, self.result, self.description) def add_configuration(config): @@ -74,12 +82,13 @@ def report_event(event): handler.publish_event(event) -def report_finish_event(event_name, event_description, successful=None): +def report_finish_event(event_name, event_description, + result=status.SUCCESS): """Report a "finish" event. See :py:func:`.report_event` for parameter details. """ - event = FinishReportingEvent(event_name, event_description, successful) + event = FinishReportingEvent(event_name, event_description, result) return report_event(event) @@ -97,4 +106,70 @@ def report_start_event(event_name, event_description): return report_event(event) +class ReportEventStack(object): + def __init__(self, name, description, message=None, parent=None, + reporting_enabled=None, result_on_exception=status.FAIL): + self.parent = parent + self.name = name + self.description = description + self.message = message + self.result_on_exception = result_on_exception + self.result = status.SUCCESS + + # use parents reporting value if not provided + if reporting_enabled is None: + if parent: + reporting_enabled = parent.reporting_enabled + else: + reporting_enabled = True + self.reporting_enabled = reporting_enabled + + if parent: + self.fullname = '/'.join((parent.fullname, name,)) + else: + self.fullname = self.name + self.children = {} + + def __repr__(self): + return ("%s reporting=%s" % (self.fullname, self.reporting_enabled)) + + def __enter__(self): + self.result = status.SUCCESS + if self.reporting_enabled: + report_start_event(self.fullname, self.description) + if self.parent: + self.parent.children[self.name] = (None, None) + return self + + def childrens_finish_info(self): + for cand_result in (status.FAIL, status.WARN): + for name, (value, msg) in self.children.items(): + if value == cand_result: + return (value, "[" + name + "]" + msg) + return (self.result, self.message) + + @property + def message(self): + if self._message is not None: + return self._message + return self.description + + @message.setter + def message(self, value): + self._message = value + + def finish_info(self, exc): + # return tuple of description, and value + if exc: + return (self.result_on_exception, self.message) + return self.childrens_finish_info() + + def __exit__(self, exc_type, exc_value, traceback): + (result, msg) = self.finish_info(exc_value) + if self.parent: + self.parent.children[self.name] = (result, msg) + if self.reporting_enabled: + report_finish_event(self.fullname, msg, result) + + add_configuration(DEFAULT_CONFIG) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index be323f53..1d5ca524 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -21,5 +21,12 @@ class LogHandler(ReportingHandler): logger.info(event.as_string()) +class StderrHandler(ReportingHandler): + def publish_event(self, event): + #sys.stderr.write(event.as_string() + "\n") + print(event.as_string()) + + available_handlers = DictRegistry() available_handlers.register_item('log', LogHandler) +available_handlers.register_item('print', StderrHandler) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 3b48f173..d07cf1fa 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -254,7 +254,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): LOG.debug("Searching for %s data source in: %s", mode, ds_names) for name, cls in zip(ds_names, ds_list): - myrep = reporting.ReportStack( + myrep = reporting.ReportEventStack( name="search-%s-%s" % (mode, name.replace("DataSource", "")), description="searching for %s data from %s" % (mode, name), message = "no %s data found from %s" % (mode, name), diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8c79ae4e..42989bb4 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -67,7 +67,7 @@ class Init(object): self.datasource = NULL_DATA_SOURCE if reporter is None: - reporter = reporting.ReportStack( + reporter = reporting.ReportEventStack( name="init-reporter", description="init-desc", reporting_enabled=False) self.reporter = reporter @@ -242,7 +242,7 @@ class Init(object): if self.datasource is not NULL_DATA_SOURCE: return self.datasource - with reporting.ReportStack( + with reporting.ReportEventStack( name="check-cache", description="attempting to read from cache", parent=self.reporter) as myrep: ds = self._restore_from_cache() @@ -508,11 +508,11 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - with reporting.ReportStack( + with reporting.ReportEventStack( "consume-user-data", "reading and applying user-data", parent=self.reporter): self._consume_userdata(frequency) - with reporting.ReportStack( + with reporting.ReportEventStack( "consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): self._consume_userdata(frequency) @@ -705,7 +705,7 @@ class Modules(object): run_name = "config-%s" % (name) desc="running %s with frequency %s" % (run_name, freq) - myrep = reporting.ReportStack( + myrep = reporting.ReportEventStack( name=run_name, description=desc, parent=self.reporter) with myrep: diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 5700118f..4f4cf3a4 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -32,7 +32,7 @@ class TestReportStartEvent(TestCase): class TestReportFinishEvent(TestCase): - def _report_finish_event(self, result=None): + def _report_finish_event(self, result=reporting.status.SUCCESS): event_name, event_description = 'my_test_event', 'my description' reporting.report_finish_event( event_name, event_description, result=result) @@ -95,31 +95,32 @@ class TestReportingHandler(TestCase): def test_no_default_publish_event_implementation(self): self.assertRaises(NotImplementedError, - reporting.ReportingHandler().publish_event, None) + reporting.handlers.ReportingHandler().publish_event, + None) class TestLogHandler(TestCase): - @mock.patch.object(reporting.logging, 'getLogger') + @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_appropriate_logger_used(self, getLogger): event_type, event_name = 'test_type', 'test_name' event = reporting.ReportingEvent(event_type, event_name, 'description') - reporting.LogHandler().publish_event(event) + reporting.handlers.LogHandler().publish_event(event) self.assertEqual( [mock.call( 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))], getLogger.call_args_list) - @mock.patch.object(reporting.logging, 'getLogger') + @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_single_log_message_at_info_published(self, getLogger): event = reporting.ReportingEvent('type', 'name', 'description') - reporting.LogHandler().publish_event(event) + reporting.handlers.LogHandler().publish_event(event) self.assertEqual(1, getLogger.return_value.info.call_count) - @mock.patch.object(reporting.logging, 'getLogger') + @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_log_message_uses_event_as_string(self, getLogger): event = reporting.ReportingEvent('type', 'name', 'description') - reporting.LogHandler().publish_event(event) + reporting.handlers.LogHandler().publish_event(event) self.assertIn(event.as_string(), getLogger.return_value.info.call_args[0][0]) @@ -130,7 +131,7 @@ class TestDefaultRegisteredHandler(TestCase): registered_items = ( reporting.instantiated_handler_registry.registered_items) for _, item in registered_items.items(): - if isinstance(item, reporting.LogHandler): + if isinstance(item, reporting.handlers.LogHandler): break else: self.fail('No reporting LogHandler registered by default.') -- cgit v1.2.3 From e29c07adc1aa9d042ae790d1cb900a6a51a85952 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sun, 2 Aug 2015 18:06:50 -0400 Subject: event name doesnt need mode as it is run through init-local or init-net --- cloudinit/sources/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index d07cf1fa..cf50c1fb 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -255,7 +255,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): for name, cls in zip(ds_names, ds_list): myrep = reporting.ReportEventStack( - name="search-%s-%s" % (mode, name.replace("DataSource", "")), + name="search-%s" % name.replace("DataSource", ""), description="searching for %s data from %s" % (mode, name), message = "no %s data found from %s" % (mode, name), parent=reporter) -- cgit v1.2.3 From 328cc7fbaf4d60b51193fb8c14e52d8c6f3273f2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 4 Aug 2015 21:57:57 -0500 Subject: pep8 fixes --- cloudinit/config/cc_rh_subscription.py | 6 +++--- cloudinit/config/cc_rsyslog.py | 1 + cloudinit/sources/DataSourceCloudStack.py | 6 ++++-- 3 files changed, 8 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 6da26d25..3b30c47e 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -130,9 +130,9 @@ class SubscriptionManager(object): ((not self.auto_attach) or (util.is_false(str(self.auto_attach)))): - no_auto = "The service-level key must be used in conjunction with "\ - "the auto-attach key. Please re-run with auto-attach: "\ - "True" + no_auto = ("The service-level key must be used in conjunction " + "with the auto-attach key. Please re-run with " + "auto-attach: True") return False, no_auto return True, None diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py index a0132d28..b8642d65 100644 --- a/cloudinit/config/cc_rsyslog.py +++ b/cloudinit/config/cc_rsyslog.py @@ -130,6 +130,7 @@ HOST_PORT_RE = re.compile( '(([[](?P[^\]]*)[\]])|(?P[^:]*))' '([:](?P[0-9]+))?$') + def reload_syslog(command=DEF_RELOAD, systemd=False): service = 'rsyslog' if command == DEF_RELOAD: diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py index d0cac5bb..64595020 100644 --- a/cloudinit/sources/DataSourceCloudStack.py +++ b/cloudinit/sources/DataSourceCloudStack.py @@ -41,10 +41,12 @@ class CloudStackPasswordServerClient(object): """ Implements password fetching from the CloudStack password server. - http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates + http://cloudstack-administration.readthedocs.org/ + en/latest/templates.html#adding-password-management-to-your-templates has documentation about the system. This implementation is following that found at - https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian + https://github.com/shankerbalan/cloudstack-scripts/ + blob/master/cloud-set-guest-password-debian """ def __init__(self, virtual_router_address): -- cgit v1.2.3 From 5585b397cfb4ba397e9cfba3d86e3d10af20eb71 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 4 Aug 2015 22:01:27 -0500 Subject: fix pep8 --- bin/cloud-init | 3 ++- cloudinit/reporting/handlers.py | 7 ------- cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 10 ++++++---- tests/unittests/test_reporting.py | 6 ++++-- 5 files changed, 13 insertions(+), 15 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index 51253c42..40cdbb06 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -628,7 +628,8 @@ def main(): if args.local: rname, rdesc = ("init-local", "searching for local datasources") else: - rname, rdesc = ("init-network", "searching for network datasources") + rname, rdesc = ("init-network", + "searching for network datasources") elif name == "modules": rname, rdesc = ("modules-%s" % args.mode, "running modules for %s" % args.mode) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 1d5ca524..be323f53 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -21,12 +21,5 @@ class LogHandler(ReportingHandler): logger.info(event.as_string()) -class StderrHandler(ReportingHandler): - def publish_event(self, event): - #sys.stderr.write(event.as_string() + "\n") - print(event.as_string()) - - available_handlers = DictRegistry() available_handlers.register_item('log', LogHandler) -available_handlers.register_item('print', StderrHandler) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index cf50c1fb..838cd198 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -257,7 +257,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): myrep = reporting.ReportEventStack( name="search-%s" % name.replace("DataSource", ""), description="searching for %s data from %s" % (mode, name), - message = "no %s data found from %s" % (mode, name), + message="no %s data found from %s" % (mode, name), parent=reporter) try: with myrep: diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 7b489b9f..d300709d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -243,7 +243,8 @@ class Init(object): return self.datasource with reporting.ReportEventStack( - name="check-cache", description="attempting to read from cache", + name="check-cache", + description="attempting to read from cache", parent=self.reporter) as myrep: ds = self._restore_from_cache() if ds: @@ -708,17 +709,18 @@ class Modules(object): # This name will affect the semaphore name created run_name = "config-%s" % (name) - desc="running %s with frequency %s" % (run_name, freq) + desc = "running %s with frequency %s" % (run_name, freq) myrep = reporting.ReportEventStack( name=run_name, description=desc, parent=self.reporter) with myrep: - ran, _r = cc.run(run_name, mod.handle, func_args, freq=freq) + ran, _r = cc.run(run_name, mod.handle, func_args, + freq=freq) if ran: myrep.message = "%s ran successfully" % run_name else: myrep.message = "%s previously ran" % run_name - + except Exception as e: util.logexc(LOG, "Running module %s (%s) failed", name, mod) failures.append((name, e)) diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 4f4cf3a4..ddfac541 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -51,7 +51,8 @@ class TestReportFinishEvent(TestCase): self, instantiated_handler_registry): event_name, event_description = self._report_finish_event() expected_string_representation = ': '.join( - ['finish', event_name, reporting.status.SUCCESS, event_description]) + ['finish', event_name, reporting.status.SUCCESS, + event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) @@ -63,7 +64,8 @@ class TestReportFinishEvent(TestCase): event_name, event_description = self._report_finish_event( result=reporting.status.SUCCESS) expected_string_representation = ': '.join( - ['finish', event_name, reporting.status.SUCCESS, event_description]) + ['finish', event_name, reporting.status.SUCCESS, + event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) -- cgit v1.2.3 From ebd393e56ba21f8a84571dff499e6d6fb6852042 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 6 Aug 2015 18:34:57 -0500 Subject: tests pass --- bin/cloud-init | 10 +++ cloudinit/reporting/handlers.py | 28 +++++++ cloudinit/sources/DataSourceMAAS.py | 88 +++++----------------- cloudinit/url_helper.py | 142 ++++++++++++++++++++++++++++++++++-- cloudinit/util.py | 3 +- 5 files changed, 196 insertions(+), 75 deletions(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index 40cdbb06..ad2e624a 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -137,6 +137,11 @@ def run_module_section(mods, action_name, section): return failures +def apply_reporting_cfg(cfg): + reporting.reset_configuration() + reporting.update_configuration(cfg.get('reporting'), {}) + + def main_init(name, args): deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK] if args.local: @@ -191,6 +196,7 @@ def main_init(name, args): " longer be active shortly")) logging.resetLogging() logging.setupLogging(init.cfg) + apply_reporting_cfg(init.cfg) # Any log usage prior to setupLogging above did not have local user log # config applied. We send the welcome message now, as stderr/out have @@ -283,6 +289,8 @@ def main_init(name, args): util.logexc(LOG, "Consuming user data failed!") return (init.datasource, ["Consuming user data failed!"]) + apply_reporting_cfg(init.cfg) + # Stage 8 - re-read and apply relevant cloud-config to include user-data mods = stages.Modules(init, extract_fns(args), reporter=args.reporter) # Stage 9 @@ -343,6 +351,7 @@ def main_modules(action_name, args): " longer be active shortly")) logging.resetLogging() logging.setupLogging(mods.cfg) + apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) @@ -405,6 +414,7 @@ def main_single(name, args): " longer be active shortly")) logging.resetLogging() logging.setupLogging(mods.cfg) + apply_reporting_cfg(init.cfg) # now that logging is setup and stdout redirected, send welcome welcome(name, msg=w_msg) diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py index 86cbe3c3..d8f69641 100644 --- a/cloudinit/reporting/handlers.py +++ b/cloudinit/reporting/handlers.py @@ -34,5 +34,33 @@ class LogHandler(ReportingHandler): logger.info(event.as_string()) +class WebHookHandler(ReportingHandler): + def __init__(self, endpoint, consumer_key=None, token_key=None, + token_secret=None, consumer_secret=None, timeout=None, + retries=None): + super(WebHookHandler, self).__init__() + + if any(consumer_key, token_key, token_secret, consumer_secret): + self.oauth_helper = url_helper.OauthHelper( + consumer_key=consumer_key, token_key=token_key, + token_secret=token_secret, consumer_secret=consumer_secret) + else: + self.oauth_helper = None + self.endpoint = endpoint + self.timeout = timeout + self.retries = retries + self.ssl_details = util.fetch_ssl_details() + + def publish_event(self, event): + if self.oauth_helper: + readurl = self.oauth_helper.readurl + else: + readurl = url_helper.readurl + return readurl( + self.endpoint, data=event.as_dict(), + timeout=self.timeout, + retries=self.retries, ssl_details=self.ssl_details) + + available_handlers = DictRegistry() available_handlers.register_item('log', LogHandler) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index c1a0eb61..279da238 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -52,7 +52,20 @@ class DataSourceMAAS(sources.DataSource): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.base_url = None self.seed_dir = os.path.join(paths.seed_dir, 'maas') - self.oauth_clockskew = None + self.oauth_helper = self._get_helper() + + def _get_helper(self): + mcfg = self.ds_cfg + # If we are missing token_key, token_secret or consumer_key + # then just do non-authed requests + for required in ('token_key', 'token_secret', 'consumer_key'): + if required not in mcfg: + return url_helper.OauthUrlHelper() + + return url_helper.OauthHelper( + consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], + token_secret=mcfg['token_secret'], + consumer_secret=mcfg.get('consumer_secret')) def __str__(self): root = sources.DataSource.__str__(self) @@ -84,9 +97,9 @@ class DataSourceMAAS(sources.DataSource): self.base_url = url - (userdata, metadata) = read_maas_seed_url(self.base_url, - self._md_headers, - paths=self.paths) + (userdata, metadata) = read_maas_seed_url( + self.base_url, self.oauth_helper.md_headers, + paths=self.paths) self.userdata_raw = userdata self.metadata = metadata return True @@ -94,31 +107,8 @@ class DataSourceMAAS(sources.DataSource): util.logexc(LOG, "Failed fetching metadata from url %s", url) return False - def _md_headers(self, url): - mcfg = self.ds_cfg - - # If we are missing token_key, token_secret or consumer_key - # then just do non-authed requests - for required in ('token_key', 'token_secret', 'consumer_key'): - if required not in mcfg: - return {} - - consumer_secret = mcfg.get('consumer_secret', "") - - timestamp = None - if self.oauth_clockskew: - timestamp = int(time.time()) + self.oauth_clockskew - - return oauth_headers(url=url, - consumer_key=mcfg['consumer_key'], - token_key=mcfg['token_key'], - token_secret=mcfg['token_secret'], - consumer_secret=consumer_secret, - timestamp=timestamp) - def wait_for_metadata_service(self, url): mcfg = self.ds_cfg - max_wait = 120 try: max_wait = int(mcfg.get("max_wait", max_wait)) @@ -138,10 +128,8 @@ class DataSourceMAAS(sources.DataSource): starttime = time.time() check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION) urls = [check_url] - url = url_helper.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, - exception_cb=self._except_cb, - headers_cb=self._md_headers) + url = self.oauth_helper.wait_for_url( + urls=urls, max_wait=max_wait, timeout=timeout) if url: LOG.debug("Using metadata source: '%s'", url) @@ -151,26 +139,6 @@ class DataSourceMAAS(sources.DataSource): return bool(url) - def _except_cb(self, msg, exception): - if not (isinstance(exception, url_helper.UrlError) and - (exception.code == 403 or exception.code == 401)): - return - - if 'date' not in exception.headers: - LOG.warn("Missing header 'date' in %s response", exception.code) - return - - date = exception.headers['date'] - try: - ret_time = time.mktime(parsedate(date)) - except Exception as e: - LOG.warn("Failed to convert datetime '%s': %s", date, e) - return - - self.oauth_clockskew = int(ret_time - time.time()) - LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew) - return - def read_maas_seed_dir(seed_d): """ @@ -280,24 +248,6 @@ def check_seed_contents(content, seed): return (userdata, md) -def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, - timestamp=None): - if timestamp: - timestamp = str(timestamp) - else: - timestamp = None - - client = oauth1.Client( - consumer_key, - client_secret=consumer_secret, - resource_owner_key=token_key, - resource_owner_secret=token_secret, - signature_method=oauth1.SIGNATURE_PLAINTEXT, - timestamp=timestamp) - uri, signed_headers, body = client.sign(url) - return signed_headers - - class MAASSeedDirNone(Exception): pass diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 0e65f431..2141cdc5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -25,6 +25,10 @@ import time import six import requests +import oauthlib.oauth1 as oauth1 +import os +import json +from functools import partial from requests import exceptions from six.moves.urllib.parse import ( @@ -147,13 +151,14 @@ class UrlResponse(object): class UrlError(IOError): - def __init__(self, cause, code=None, headers=None): + def __init__(self, cause, code=None, headers=None, url=None): IOError.__init__(self, str(cause)) self.cause = cause self.code = code self.headers = headers if self.headers is None: self.headers = {} + self.url = url def _get_ssl_args(url, ssl_details): @@ -247,9 +252,10 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, and hasattr(e, 'response') # This appeared in v 0.10.8 and hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, - headers=e.response.headers)) + headers=e.response.headers, + url=url)) else: - excps.append(UrlError(e)) + excps.append(UrlError(e, url=url)) if SSL_ENABLED and isinstance(e, exceptions.SSLError): # ssl exceptions are not going to get fixed by waiting a # few seconds @@ -333,11 +339,11 @@ def wait_for_url(urls, max_wait=None, timeout=None, if not response.contents: reason = "empty response [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, - headers=response.headers) + headers=response.headers, url=url) elif not response.ok(): reason = "bad status code [%s]" % (response.code) url_exc = UrlError(ValueError(reason), code=response.code, - headers=response.headers) + headers=response.headers, url=url) else: return url except UrlError as e: @@ -368,3 +374,129 @@ def wait_for_url(urls, max_wait=None, timeout=None, time.sleep(sleep_time) return False + + +class OauthUrlHelper(object): + def __init__(self, consumer_key=None, token_key=None, + token_secret=None, consumer_secret=None, + skew_data_file="/run/oauth_skew.json"): + self.consumer_key = consumer_key + self.consumer_secret = consumer_secret or "" + self.token_key = token_key + self.token_secret = token_secret + self.skew_data_file = skew_data_file + self.skew_data = {} + self._do_oauth = True + self.skew_change_limit = 5 + required = (self.token_key, self.token_secret, self.consumer_key) + if not any(required): + self._do_oauth = False + elif not all(required): + raise ValueError("all or none of token_key, token_secret, or " + "consumer_key can be set") + + self.skew_data = self.read_skew_file() + + def read_skew_file(self): + if self.skew_data_file and os.path.isfile(self.skew_data_file): + with open(self.skew_data_file, mode="r") as fp: + return json.load(fp.read()) + return None + + def update_skew_file(self, host, value): + # this is not atomic + cur = self.read_skew_file() + if cur is None or not self.skew_data_file: + return + cur[host] = value + with open(self.skew_data_file, mode="w") as fp: + fp.write(json.dumps(cur)) + + def exception_cb(self, msg, exception): + if not (isinstance(exception, UrlError) and + (exception.code == 403 or exception.code == 401)): + return + + if 'date' not in exception.headers: + LOG.warn("Missing header 'date' in %s response", exception.code) + return + + date = exception.headers['date'] + try: + ret_time = time.mktime(parsedate(date)) + except Exception as e: + LOG.warn("Failed to convert datetime '%s': %s", date, e) + return + + host = urlparse(exception.url).netloc + skew = int(ret_time - time.time()) + old_skew = self.skew_data.get(host) + if abs(old_skew - skew) > self.skew_change_limit: + self.update_skew_file(host, skew) + LOG.warn("Setting oauth clockskew for %s to %d", + host, skew) + skew_data[host] = skew + + return + + def headers_cb(self, url): + if not self._do_oauth: + return {} + + timestamp = None + host = urlparse(url).netloc + if host in self.skew_data: + timestamp = int(time.time()) + self.skew_data[host] + + return oauth_headers( + url=url, consumer_key=self.consumer_key, + token_key=self.token_key, token_secret=self.token_secret, + consumer_secret=self.consumer_secret, timestamp=timestamp) + + def _wrapped(self, wrapped_func, args, kwargs): + kwargs['headers_cb'] = partial( + self._headers_cb, kwargs.get('headers_cb')) + kwargs['exception_cb'] = partial( + self._exception_cb, kwargs.get('exception_cb')) + return wrapped_func(*args, **kwargs) + + def wait_for_url(self, *args, **kwargs): + return self._wrapped(wait_for_url, args, kwargs) + + def readurl(self, *args, **kwargs): + return self._wrapped(readurl, args, kwargs) + + def _exception_cb(self, extra_exception_cb, url, msg, exception): + ret = None + try: + if extra_exception_cb: + ret = extra_exception_cb(msg, exception) + finally: + self.exception_cb(self, msg, exception) + return ret + + def _headers_cb(self, extra_headers_cb, url): + headers = {} + if extra_headers_cb: + headers = extra_headers_cb(url) + if headers: + headers.update(self.headers_cb(url)) + return headers + + +def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret, + timestamp=None): + if timestamp: + timestamp = str(timestamp) + else: + timestamp = None + + client = oauth1.Client( + consumer_key, + client_secret=consumer_secret, + resource_owner_key=token_key, + resource_owner_secret=token_secret, + signature_method=oauth1.SIGNATURE_PLAINTEXT, + timestamp=timestamp) + uri, signed_headers, body = client.sign(url) + return signed_headers diff --git a/cloudinit/util.py b/cloudinit/util.py index 02ba654a..09e583f5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -782,7 +782,8 @@ def read_file_or_url(url, timeout=5, retries=10, code = e.errno if e.errno == errno.ENOENT: code = url_helper.NOT_FOUND - raise url_helper.UrlError(cause=e, code=code, headers=None) + raise url_helper.UrlError(cause=e, code=code, headers=None, + url=url) return url_helper.FileResponse(file_path, contents=contents) else: return url_helper.readurl(url, -- cgit v1.2.3 From 48cb8699efb5c6116dfa7b4d76d0a5fb6b3fbbbf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 7 Aug 2015 00:22:49 -0500 Subject: hopefully fix DataSourceMAAS --- cloudinit/sources/DataSourceMAAS.py | 58 +++++++++++----------------- tests/unittests/test_datasource/test_maas.py | 2 +- 2 files changed, 24 insertions(+), 36 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 279da238..2f36bbe2 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -164,12 +164,12 @@ def read_maas_seed_dir(seed_d): return check_seed_contents(md, seed_d) -def read_maas_seed_url(seed_url, header_cb=None, timeout=None, +def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, version=MD_VERSION, paths=None): """ Read the maas datasource at seed_url. - - header_cb is a method that should return a headers dictionary for - a given url + read_file_or_url is a method that should provide an interface + like util.read_file_or_url Expected format of seed_url is are the following files: * //meta-data/instance-id @@ -190,14 +190,12 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, 'user-data': "%s/%s" % (base_url, 'user-data'), } + if read_file_or_url is None: + read_file_or_url = util.read_file_or_url + md = {} for name in file_order: url = files.get(name) - if not header_cb: - def _cb(url): - return {} - header_cb = _cb - if name == 'user-data': retries = 0 else: @@ -205,10 +203,8 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None, try: ssl_details = util.fetch_ssl_details(paths) - resp = util.read_file_or_url(url, retries=retries, - headers_cb=header_cb, - timeout=timeout, - ssl_details=ssl_details) + resp = read_file_or_url(url, retries=retries, + timeout=timeout, ssl_details=ssl_details) if resp.ok(): if name in BINARY_FIELDS: md[name] = resp.contents @@ -311,47 +307,39 @@ if __name__ == "__main__": if key in cfg and creds[key] is None: creds[key] = cfg[key] - def geturl(url, headers_cb): - req = Request(url, data=None, headers=headers_cb(url)) - return urlopen(req).read() + oauth_helper = url_helper.OauthUrlHelper(**creds) + + def geturl(url): + return oauth_helper.readurl(url).contents def printurl(url, headers_cb): - print("== %s ==\n%s\n" % (url, geturl(url, headers_cb))) + print("== %s ==\n%s\n" % (url, geturl(url))) - def crawl(url, headers_cb=None): + def crawl(url): if url.endswith("/"): - for line in geturl(url, headers_cb).splitlines(): + for line in geturl(url).splitlines(): if line.endswith("/"): - crawl("%s%s" % (url, line), headers_cb) + crawl("%s%s" % (url, line)) else: - printurl("%s%s" % (url, line), headers_cb) + printurl("%s%s" % (url, line)) else: - printurl(url, headers_cb) - - def my_headers(url): - headers = {} - if creds.get('consumer_key', None) is not None: - headers = oauth_headers(url, **creds) - return headers + printurl(url) if args.subcmd == "check-seed": - if args.url.startswith("http"): - (userdata, metadata) = read_maas_seed_url(args.url, - header_cb=my_headers, - version=args.apiver) - else: - (userdata, metadata) = read_maas_seed_url(args.url) + (userdata, metadata) = read_maas_seed_url( + args.url, read_file_or_url=oauth_helper.read_file_or_url, + version=args.apiver) print("=== userdata ===") print(userdata) print("=== metadata ===") pprint.pprint(metadata) elif args.subcmd == "get": - printurl(args.url, my_headers) + printurl(args.url) elif args.subcmd == "crawl": if not args.url.endswith("/"): args.url = "%s/" % args.url - crawl(args.url, my_headers) + crawl(args.url) main() diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index f109bb04..eb97b692 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -141,7 +141,7 @@ class TestMAASDataSource(TestCase): with mock.patch.object(url_helper, 'readurl', side_effect=side_effect()) as mockobj: userdata, metadata = DataSourceMAAS.read_maas_seed_url( - my_seed, header_cb=my_headers_cb, version=my_ver) + my_seed, version=my_ver) self.assertEqual(b"foodata", userdata) self.assertEqual(metadata['instance-id'], -- cgit v1.2.3 From 60a9ebaba73b2154ce841d36978e317197b66945 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 25 Aug 2015 15:03:35 -0400 Subject: MAAS: fixes to data source and OauthUrlHelper the previous version was broken. The vital fixes here are: * adding parsedate and oauth1 imports to url_helper * fix skew_data usage intending to use self.skew_data Additionally: * reorder imports in url_helper * fixes to python3 -m cloudinit.sources.DataSourceMaas LP: #1488507 --- cloudinit/sources/DataSourceMAAS.py | 25 +++++++++++++------------ cloudinit/url_helper.py | 14 +++++++------- 2 files changed, 20 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 2f36bbe2..6c95c218 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -20,14 +20,10 @@ from __future__ import print_function -from email.utils import parsedate import errno -import oauthlib.oauth1 as oauth1 import os import time -from six.moves.urllib_request import Request, urlopen - from cloudinit import log as logging from cloudinit import sources from cloudinit import url_helper @@ -62,7 +58,7 @@ class DataSourceMAAS(sources.DataSource): if required not in mcfg: return url_helper.OauthUrlHelper() - return url_helper.OauthHelper( + return url_helper.OauthUrlHelper( consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'], token_secret=mcfg['token_secret'], consumer_secret=mcfg.get('consumer_secret')) @@ -98,7 +94,7 @@ class DataSourceMAAS(sources.DataSource): self.base_url = url (userdata, metadata) = read_maas_seed_url( - self.base_url, self.oauth_helper.md_headers, + self.base_url, read_file_or_url=self.oauth_helper.readurl, paths=self.paths) self.userdata_raw = userdata self.metadata = metadata @@ -312,25 +308,30 @@ if __name__ == "__main__": def geturl(url): return oauth_helper.readurl(url).contents - def printurl(url, headers_cb): - print("== %s ==\n%s\n" % (url, geturl(url))) + def printurl(url): + print("== %s ==\n%s\n" % (url, geturl(url).decode())) def crawl(url): if url.endswith("/"): - for line in geturl(url).splitlines(): + for line in geturl(url).decode().splitlines(): if line.endswith("/"): crawl("%s%s" % (url, line)) + elif line == "meta-data": + # meta-data is a dir, it *should* end in a / + crawl("%s%s" % (url, "meta-data/")) else: printurl("%s%s" % (url, line)) else: printurl(url) if args.subcmd == "check-seed": + readurl = oauth_helper.readurl + if args.url[0] == "/" or args.url.startswith("file://"): + readurl = None (userdata, metadata) = read_maas_seed_url( - args.url, read_file_or_url=oauth_helper.read_file_or_url, - version=args.apiver) + args.url, version=args.apiver, read_file_or_url=readurl) print("=== userdata ===") - print(userdata) + print(userdata.decode()) print("=== metadata ===") pprint.pprint(metadata) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index dca4cc85..ce6b5444 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -20,16 +20,16 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -import time - +import json +import os +import requests import six +import time -import requests -import oauthlib.oauth1 as oauth1 -import os -import json +from email.utils import parsedate from functools import partial from requests import exceptions +import oauthlib.oauth1 as oauth1 from six.moves.urllib.parse import ( urlparse, urlunparse, @@ -434,7 +434,7 @@ class OauthUrlHelper(object): if abs(old_skew - skew) > self.skew_change_limit: self.update_skew_file(host, skew) LOG.warn("Setting oauth clockskew for %s to %d", host, skew) - skew_data[host] = skew + self.skew_data[host] = skew return -- cgit v1.2.3 From 50bcb0f77d29a76a03946c6da13b15be25257402 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 31 Aug 2015 13:33:30 -0400 Subject: split 'events' portion of reporting into separate file this just separates events from other things that could conceivably be reported. --- cloudinit/cloud.py | 4 +- cloudinit/reporting/__init__.py | 203 +----------------------------------- cloudinit/reporting/events.py | 210 ++++++++++++++++++++++++++++++++++++++ cloudinit/sources/__init__.py | 4 +- cloudinit/stages.py | 14 +-- tests/unittests/test_reporting.py | 121 +++++++++++----------- 6 files changed, 285 insertions(+), 271 deletions(-) create mode 100644 cloudinit/reporting/events.py (limited to 'cloudinit/sources') diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index edee3887..3e6be203 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -24,7 +24,7 @@ import copy import os from cloudinit import log as logging -from cloudinit import reporting +from cloudinit.reporting import events LOG = logging.getLogger(__name__) @@ -48,7 +48,7 @@ class Cloud(object): self._cfg = cfg self._runners = runners if reporter is None: - reporter = reporting.ReportEventStack( + reporter = events.ReportEventStack( name="unnamed-cloud-reporter", description="unnamed-cloud-reporter", reporting_enabled=False) diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py index 502af95c..6b41ae61 100644 --- a/cloudinit/reporting/__init__.py +++ b/cloudinit/reporting/__init__.py @@ -1,7 +1,6 @@ # Copyright 2015 Canonical Ltd. # This file is part of cloud-init. See LICENCE file for license information. # -# vi: ts=4 expandtab """ cloud-init reporting framework @@ -10,66 +9,13 @@ report events in a structured manner. """ from ..registry import DictRegistry -from ..reporting.handlers import available_handlers - - -FINISH_EVENT_TYPE = 'finish' -START_EVENT_TYPE = 'start' +from .handlers import available_handlers DEFAULT_CONFIG = { 'logging': {'type': 'log'}, } -class _nameset(set): - def __getattr__(self, name): - if name in self: - return name - raise AttributeError("%s not a valid value" % name) - - -status = _nameset(("SUCCESS", "WARN", "FAIL")) - - -class ReportingEvent(object): - """Encapsulation of event formatting.""" - - def __init__(self, event_type, name, description): - self.event_type = event_type - self.name = name - self.description = description - - def as_string(self): - """The event represented as a string.""" - return '{0}: {1}: {2}'.format( - self.event_type, self.name, self.description) - - def as_dict(self): - """The event represented as a dictionary.""" - return {'name': self.name, 'description': self.description, - 'event_type': self.event_type} - - -class FinishReportingEvent(ReportingEvent): - - def __init__(self, name, description, result=status.SUCCESS): - super(FinishReportingEvent, self).__init__( - FINISH_EVENT_TYPE, name, description) - self.result = result - if result not in status: - raise ValueError("Invalid result: %s" % result) - - def as_string(self): - return '{0}: {1}: {2}: {3}'.format( - self.event_type, self.name, self.result, self.description) - - def as_dict(self): - """The event represented as json friendly.""" - data = super(FinishReportingEvent, self).as_dict() - data['result'] = self.result - return data - - def update_configuration(config): """Update the instanciated_handler_registry. @@ -90,150 +36,7 @@ def update_configuration(config): instantiated_handler_registry.register_item(handler_name, instance) -def report_event(event): - """Report an event to all registered event handlers. - - This should generally be called via one of the other functions in - the reporting module. - - :param event_type: - The type of the event; this should be a constant from the - reporting module. - """ - for _, handler in instantiated_handler_registry.registered_items.items(): - handler.publish_event(event) - - -def report_finish_event(event_name, event_description, - result=status.SUCCESS): - """Report a "finish" event. - - See :py:func:`.report_event` for parameter details. - """ - event = FinishReportingEvent(event_name, event_description, result) - return report_event(event) - - -def report_start_event(event_name, event_description): - """Report a "start" event. - - :param event_name: - The name of the event; this should be a topic which events would - share (e.g. it will be the same for start and finish events). - - :param event_description: - A human-readable description of the event that has occurred. - """ - event = ReportingEvent(START_EVENT_TYPE, event_name, event_description) - return report_event(event) - - -class ReportEventStack(object): - """Context Manager for using :py:func:`report_event` - - This enables calling :py:func:`report_start_event` and - :py:func:`report_finish_event` through a context manager. - - :param name: - the name of the event - - :param description: - the event's description, passed on to :py:func:`report_start_event` - - :param message: - the description to use for the finish event. defaults to - :param:description. - - :param parent: - :type parent: :py:class:ReportEventStack or None - The parent of this event. The parent is populated with - results of all its children. The name used in reporting - is / - - :param reporting_enabled: - Indicates if reporting events should be generated. - If not provided, defaults to the parent's value, or True if no parent - is provided. - - :param result_on_exception: - The result value to set if an exception is caught. default - value is FAIL. - """ - def __init__(self, name, description, message=None, parent=None, - reporting_enabled=None, result_on_exception=status.FAIL): - self.parent = parent - self.name = name - self.description = description - self.message = message - self.result_on_exception = result_on_exception - self.result = status.SUCCESS - - # use parents reporting value if not provided - if reporting_enabled is None: - if parent: - reporting_enabled = parent.reporting_enabled - else: - reporting_enabled = True - self.reporting_enabled = reporting_enabled - - if parent: - self.fullname = '/'.join((parent.fullname, name,)) - else: - self.fullname = self.name - self.children = {} - - def __repr__(self): - return ("ReportEventStack(%s, %s, reporting_enabled=%s)" % - (self.name, self.description, self.reporting_enabled)) - - def __enter__(self): - self.result = status.SUCCESS - if self.reporting_enabled: - report_start_event(self.fullname, self.description) - if self.parent: - self.parent.children[self.name] = (None, None) - return self - - def _childrens_finish_info(self): - for cand_result in (status.FAIL, status.WARN): - for name, (value, msg) in self.children.items(): - if value == cand_result: - return (value, self.message) - return (self.result, self.message) - - @property - def result(self): - return self._result - - @result.setter - def result(self, value): - if value not in status: - raise ValueError("'%s' not a valid result" % value) - self._result = value - - @property - def message(self): - if self._message is not None: - return self._message - return self.description - - @message.setter - def message(self, value): - self._message = value - - def _finish_info(self, exc): - # return tuple of description, and value - if exc: - return (self.result_on_exception, self.message) - return self._childrens_finish_info() - - def __exit__(self, exc_type, exc_value, traceback): - (result, msg) = self._finish_info(exc_value) - if self.parent: - self.parent.children[self.name] = (result, msg) - if self.reporting_enabled: - report_finish_event(self.fullname, msg, result) - - instantiated_handler_registry = DictRegistry() update_configuration(DEFAULT_CONFIG) + +# vi: ts=4 expandtab diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py new file mode 100644 index 00000000..e35e41dd --- /dev/null +++ b/cloudinit/reporting/events.py @@ -0,0 +1,210 @@ +# Copyright 2015 Canonical Ltd. +# This file is part of cloud-init. See LICENCE file for license information. +# +""" +cloud-init events + +Report events in a structured manner. +The events here are most likely used via reporting. +""" + +from . import instantiated_handler_registry + +FINISH_EVENT_TYPE = 'finish' +START_EVENT_TYPE = 'start' + + +class _nameset(set): + def __getattr__(self, name): + if name in self: + return name + raise AttributeError("%s not a valid value" % name) + + +status = _nameset(("SUCCESS", "WARN", "FAIL")) + + +class ReportingEvent(object): + """Encapsulation of event formatting.""" + + def __init__(self, event_type, name, description): + self.event_type = event_type + self.name = name + self.description = description + + def as_string(self): + """The event represented as a string.""" + return '{0}: {1}: {2}'.format( + self.event_type, self.name, self.description) + + def as_dict(self): + """The event represented as a dictionary.""" + return {'name': self.name, 'description': self.description, + 'event_type': self.event_type} + + +class FinishReportingEvent(ReportingEvent): + + def __init__(self, name, description, result=status.SUCCESS): + super(FinishReportingEvent, self).__init__( + FINISH_EVENT_TYPE, name, description) + self.result = result + if result not in status: + raise ValueError("Invalid result: %s" % result) + + def as_string(self): + return '{0}: {1}: {2}: {3}'.format( + self.event_type, self.name, self.result, self.description) + + def as_dict(self): + """The event represented as json friendly.""" + data = super(FinishReportingEvent, self).as_dict() + data['result'] = self.result + return data + + +def report_event(event): + """Report an event to all registered event handlers. + + This should generally be called via one of the other functions in + the reporting module. + + :param event_type: + The type of the event; this should be a constant from the + reporting module. + """ + for _, handler in instantiated_handler_registry.registered_items.items(): + handler.publish_event(event) + + +def report_finish_event(event_name, event_description, + result=status.SUCCESS): + """Report a "finish" event. + + See :py:func:`.report_event` for parameter details. + """ + event = FinishReportingEvent(event_name, event_description, result) + return report_event(event) + + +def report_start_event(event_name, event_description): + """Report a "start" event. + + :param event_name: + The name of the event; this should be a topic which events would + share (e.g. it will be the same for start and finish events). + + :param event_description: + A human-readable description of the event that has occurred. + """ + event = ReportingEvent(START_EVENT_TYPE, event_name, event_description) + return report_event(event) + + +class ReportEventStack(object): + """Context Manager for using :py:func:`report_event` + + This enables calling :py:func:`report_start_event` and + :py:func:`report_finish_event` through a context manager. + + :param name: + the name of the event + + :param description: + the event's description, passed on to :py:func:`report_start_event` + + :param message: + the description to use for the finish event. defaults to + :param:description. + + :param parent: + :type parent: :py:class:ReportEventStack or None + The parent of this event. The parent is populated with + results of all its children. The name used in reporting + is / + + :param reporting_enabled: + Indicates if reporting events should be generated. + If not provided, defaults to the parent's value, or True if no parent + is provided. + + :param result_on_exception: + The result value to set if an exception is caught. default + value is FAIL. + """ + def __init__(self, name, description, message=None, parent=None, + reporting_enabled=None, result_on_exception=status.FAIL): + self.parent = parent + self.name = name + self.description = description + self.message = message + self.result_on_exception = result_on_exception + self.result = status.SUCCESS + + # use parents reporting value if not provided + if reporting_enabled is None: + if parent: + reporting_enabled = parent.reporting_enabled + else: + reporting_enabled = True + self.reporting_enabled = reporting_enabled + + if parent: + self.fullname = '/'.join((parent.fullname, name,)) + else: + self.fullname = self.name + self.children = {} + + def __repr__(self): + return ("ReportEventStack(%s, %s, reporting_enabled=%s)" % + (self.name, self.description, self.reporting_enabled)) + + def __enter__(self): + self.result = status.SUCCESS + if self.reporting_enabled: + report_start_event(self.fullname, self.description) + if self.parent: + self.parent.children[self.name] = (None, None) + return self + + def _childrens_finish_info(self): + for cand_result in (status.FAIL, status.WARN): + for name, (value, msg) in self.children.items(): + if value == cand_result: + return (value, self.message) + return (self.result, self.message) + + @property + def result(self): + return self._result + + @result.setter + def result(self, value): + if value not in status: + raise ValueError("'%s' not a valid result" % value) + self._result = value + + @property + def message(self): + if self._message is not None: + return self._message + return self.description + + @message.setter + def message(self, value): + self._message = value + + def _finish_info(self, exc): + # return tuple of description, and value + if exc: + return (self.result_on_exception, self.message) + return self._childrens_finish_info() + + def __exit__(self, exc_type, exc_value, traceback): + (result, msg) = self._finish_info(exc_value) + if self.parent: + self.parent.children[self.name] = (result, msg) + if self.reporting_enabled: + report_finish_event(self.fullname, msg, result) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 838cd198..d3cfa560 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -27,12 +27,12 @@ import six from cloudinit import importer from cloudinit import log as logging -from cloudinit import reporting from cloudinit import type_utils from cloudinit import user_data as ud from cloudinit import util from cloudinit.filters import launch_index +from cloudinit.reporting import events DEP_FILESYSTEM = "FILESYSTEM" DEP_NETWORK = "NETWORK" @@ -254,7 +254,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter): LOG.debug("Searching for %s data source in: %s", mode, ds_names) for name, cls in zip(ds_names, ds_list): - myrep = reporting.ReportEventStack( + myrep = events.ReportEventStack( name="search-%s" % name.replace("DataSource", ""), description="searching for %s data from %s" % (mode, name), message="no %s data found from %s" % (mode, name), diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d300709d..9f192c8d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -46,7 +46,7 @@ from cloudinit import log as logging from cloudinit import sources from cloudinit import type_utils from cloudinit import util -from cloudinit import reporting +from cloudinit.reporting import events LOG = logging.getLogger(__name__) @@ -67,7 +67,7 @@ class Init(object): self.datasource = NULL_DATA_SOURCE if reporter is None: - reporter = reporting.ReportEventStack( + reporter = events.ReportEventStack( name="init-reporter", description="init-desc", reporting_enabled=False) self.reporter = reporter @@ -242,7 +242,7 @@ class Init(object): if self.datasource is not NULL_DATA_SOURCE: return self.datasource - with reporting.ReportEventStack( + with events.ReportEventStack( name="check-cache", description="attempting to read from cache", parent=self.reporter) as myrep: @@ -509,11 +509,11 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - with reporting.ReportEventStack( + with events.ReportEventStack( "consume-user-data", "reading and applying user-data", parent=self.reporter): self._consume_userdata(frequency) - with reporting.ReportEventStack( + with events.ReportEventStack( "consume-vendor-data", "reading and applying vendor-data", parent=self.reporter): self._consume_vendordata(frequency) @@ -595,7 +595,7 @@ class Modules(object): # Created on first use self._cached_cfg = None if reporter is None: - reporter = reporting.ReportEventStack( + reporter = events.ReportEventStack( name="module-reporter", description="module-desc", reporting_enabled=False) self.reporter = reporter @@ -710,7 +710,7 @@ class Modules(object): run_name = "config-%s" % (name) desc = "running %s with frequency %s" % (run_name, freq) - myrep = reporting.ReportEventStack( + myrep = events.ReportEventStack( name=run_name, description=desc, parent=self.reporter) with myrep: diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py index 66d4e87e..bb67ef73 100644 --- a/tests/unittests/test_reporting.py +++ b/tests/unittests/test_reporting.py @@ -5,6 +5,7 @@ from cloudinit import reporting from cloudinit.reporting import handlers +from cloudinit.reporting import events from .helpers import (mock, TestCase) @@ -16,12 +17,12 @@ def _fake_registry(): class TestReportStartEvent(TestCase): - @mock.patch('cloudinit.reporting.instantiated_handler_registry', + @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', new_callable=_fake_registry) def test_report_start_event_passes_something_with_as_string_to_handlers( self, instantiated_handler_registry): event_name, event_description = 'my_test_event', 'my description' - reporting.report_start_event(event_name, event_description) + events.report_start_event(event_name, event_description) expected_string_representation = ': '.join( ['start', event_name, event_description]) for _, handler in ( @@ -33,9 +34,9 @@ class TestReportStartEvent(TestCase): class TestReportFinishEvent(TestCase): - def _report_finish_event(self, result=reporting.status.SUCCESS): + def _report_finish_event(self, result=events.status.SUCCESS): event_name, event_description = 'my_test_event', 'my description' - reporting.report_finish_event( + events.report_finish_event( event_name, event_description, result=result) return event_name, event_description @@ -46,39 +47,39 @@ class TestReportFinishEvent(TestCase): event = handler.publish_event.call_args[0][0] self.assertEqual(expected_as_string, event.as_string()) - @mock.patch('cloudinit.reporting.instantiated_handler_registry', + @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', new_callable=_fake_registry) def test_report_finish_event_passes_something_with_as_string_to_handlers( self, instantiated_handler_registry): event_name, event_description = self._report_finish_event() expected_string_representation = ': '.join( - ['finish', event_name, reporting.status.SUCCESS, + ['finish', event_name, events.status.SUCCESS, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) - @mock.patch('cloudinit.reporting.instantiated_handler_registry', + @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', new_callable=_fake_registry) def test_reporting_successful_finish_has_sensible_string_repr( self, instantiated_handler_registry): event_name, event_description = self._report_finish_event( - result=reporting.status.SUCCESS) + result=events.status.SUCCESS) expected_string_representation = ': '.join( - ['finish', event_name, reporting.status.SUCCESS, + ['finish', event_name, events.status.SUCCESS, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) - @mock.patch('cloudinit.reporting.instantiated_handler_registry', + @mock.patch('cloudinit.reporting.events.instantiated_handler_registry', new_callable=_fake_registry) def test_reporting_unsuccessful_finish_has_sensible_string_repr( self, instantiated_handler_registry): event_name, event_description = self._report_finish_event( - result=reporting.status.FAIL) + result=events.status.FAIL) expected_string_representation = ': '.join( - ['finish', event_name, reporting.status.FAIL, event_description]) + ['finish', event_name, events.status.FAIL, event_description]) self.assertHandlersPassedObjectWithAsString( instantiated_handler_registry.registered_items, expected_string_representation) @@ -91,14 +92,14 @@ class TestReportingEvent(TestCase): def test_as_string(self): event_type, name, description = 'test_type', 'test_name', 'test_desc' - event = reporting.ReportingEvent(event_type, name, description) + event = events.ReportingEvent(event_type, name, description) expected_string_representation = ': '.join( [event_type, name, description]) self.assertEqual(expected_string_representation, event.as_string()) def test_as_dict(self): event_type, name, desc = 'test_type', 'test_name', 'test_desc' - event = reporting.ReportingEvent(event_type, name, desc) + event = events.ReportingEvent(event_type, name, desc) self.assertEqual( {'event_type': event_type, 'name': name, 'description': desc}, event.as_dict()) @@ -106,9 +107,9 @@ class TestReportingEvent(TestCase): class TestFinishReportingEvent(TestCase): def test_as_has_result(self): - result = reporting.status.SUCCESS + result = events.status.SUCCESS name, desc = 'test_name', 'test_desc' - event = reporting.FinishReportingEvent(name, desc, result) + event = events.FinishReportingEvent(name, desc, result) ret = event.as_dict() self.assertTrue('result' in ret) self.assertEqual(ret['result'], result) @@ -126,7 +127,7 @@ class TestLogHandler(TestCase): @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_appropriate_logger_used(self, getLogger): event_type, event_name = 'test_type', 'test_name' - event = reporting.ReportingEvent(event_type, event_name, 'description') + event = events.ReportingEvent(event_type, event_name, 'description') reporting.handlers.LogHandler().publish_event(event) self.assertEqual( [mock.call( @@ -135,13 +136,13 @@ class TestLogHandler(TestCase): @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_single_log_message_at_info_published(self, getLogger): - event = reporting.ReportingEvent('type', 'name', 'description') + event = events.ReportingEvent('type', 'name', 'description') reporting.handlers.LogHandler().publish_event(event) self.assertEqual(1, getLogger.return_value.log.call_count) @mock.patch.object(reporting.handlers.logging, 'getLogger') def test_log_message_uses_event_as_string(self, getLogger): - event = reporting.ReportingEvent('type', 'name', 'description') + event = events.ReportingEvent('type', 'name', 'description') reporting.handlers.LogHandler(level="INFO").publish_event(event) self.assertIn(event.as_string(), getLogger.return_value.log.call_args[0][1]) @@ -232,49 +233,49 @@ class TestReportingConfiguration(TestCase): class TestReportingEventStack(TestCase): - @mock.patch('cloudinit.reporting.report_finish_event') - @mock.patch('cloudinit.reporting.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') def test_start_and_finish_success(self, report_start, report_finish): - with reporting.ReportEventStack(name="myname", description="mydesc"): + with events.ReportEventStack(name="myname", description="mydesc"): pass self.assertEqual( [mock.call('myname', 'mydesc')], report_start.call_args_list) self.assertEqual( - [mock.call('myname', 'mydesc', reporting.status.SUCCESS)], + [mock.call('myname', 'mydesc', events.status.SUCCESS)], report_finish.call_args_list) - @mock.patch('cloudinit.reporting.report_finish_event') - @mock.patch('cloudinit.reporting.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') def test_finish_exception_defaults_fail(self, report_start, report_finish): name = "myname" desc = "mydesc" try: - with reporting.ReportEventStack(name, description=desc): + with events.ReportEventStack(name, description=desc): raise ValueError("This didnt work") except ValueError: pass self.assertEqual([mock.call(name, desc)], report_start.call_args_list) self.assertEqual( - [mock.call(name, desc, reporting.status.FAIL)], + [mock.call(name, desc, events.status.FAIL)], report_finish.call_args_list) - @mock.patch('cloudinit.reporting.report_finish_event') - @mock.patch('cloudinit.reporting.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') def test_result_on_exception_used(self, report_start, report_finish): name = "myname" desc = "mydesc" try: - with reporting.ReportEventStack( - name, desc, result_on_exception=reporting.status.WARN): + with events.ReportEventStack( + name, desc, result_on_exception=events.status.WARN): raise ValueError("This didnt work") except ValueError: pass self.assertEqual([mock.call(name, desc)], report_start.call_args_list) self.assertEqual( - [mock.call(name, desc, reporting.status.WARN)], + [mock.call(name, desc, events.status.WARN)], report_finish.call_args_list) - @mock.patch('cloudinit.reporting.report_start_event') + @mock.patch('cloudinit.reporting.events.report_start_event') def test_child_fullname_respects_parent(self, report_start): parent_name = "topname" c1_name = "c1name" @@ -282,59 +283,59 @@ class TestReportingEventStack(TestCase): c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name]) c1_expected_fullname = '/'.join([parent_name, c1_name]) - parent = reporting.ReportEventStack(parent_name, "topdesc") - c1 = reporting.ReportEventStack(c1_name, "c1desc", parent=parent) - c2 = reporting.ReportEventStack(c2_name, "c2desc", parent=c1) + parent = events.ReportEventStack(parent_name, "topdesc") + c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent) + c2 = events.ReportEventStack(c2_name, "c2desc", parent=c1) with c1: report_start.assert_called_with(c1_expected_fullname, "c1desc") with c2: report_start.assert_called_with(c2_expected_fullname, "c2desc") - @mock.patch('cloudinit.reporting.report_finish_event') - @mock.patch('cloudinit.reporting.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') def test_child_result_bubbles_up(self, report_start, report_finish): - parent = reporting.ReportEventStack("topname", "topdesc") - child = reporting.ReportEventStack("c_name", "c_desc", parent=parent) + parent = events.ReportEventStack("topname", "topdesc") + child = events.ReportEventStack("c_name", "c_desc", parent=parent) with parent: with child: - child.result = reporting.status.WARN + child.result = events.status.WARN report_finish.assert_called_with( - "topname", "topdesc", reporting.status.WARN) + "topname", "topdesc", events.status.WARN) - @mock.patch('cloudinit.reporting.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') def test_message_used_in_finish(self, report_finish): - with reporting.ReportEventStack("myname", "mydesc", - message="mymessage"): + with events.ReportEventStack("myname", "mydesc", + message="mymessage"): pass self.assertEqual( - [mock.call("myname", "mymessage", reporting.status.SUCCESS)], + [mock.call("myname", "mymessage", events.status.SUCCESS)], report_finish.call_args_list) - @mock.patch('cloudinit.reporting.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') def test_message_updatable(self, report_finish): - with reporting.ReportEventStack("myname", "mydesc") as c: + with events.ReportEventStack("myname", "mydesc") as c: c.message = "all good" self.assertEqual( - [mock.call("myname", "all good", reporting.status.SUCCESS)], + [mock.call("myname", "all good", events.status.SUCCESS)], report_finish.call_args_list) - @mock.patch('cloudinit.reporting.report_start_event') - @mock.patch('cloudinit.reporting.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') def test_reporting_disabled_does_not_report_events( self, report_start, report_finish): - with reporting.ReportEventStack("a", "b", reporting_enabled=False): + with events.ReportEventStack("a", "b", reporting_enabled=False): pass self.assertEqual(report_start.call_count, 0) self.assertEqual(report_finish.call_count, 0) - @mock.patch('cloudinit.reporting.report_start_event') - @mock.patch('cloudinit.reporting.report_finish_event') + @mock.patch('cloudinit.reporting.events.report_start_event') + @mock.patch('cloudinit.reporting.events.report_finish_event') def test_reporting_child_default_to_parent( self, report_start, report_finish): - parent = reporting.ReportEventStack( + parent = events.ReportEventStack( "pname", "pdesc", reporting_enabled=False) - child = reporting.ReportEventStack("cname", "cdesc", parent=parent) + child = events.ReportEventStack("cname", "cdesc", parent=parent) with parent: with child: pass @@ -343,17 +344,17 @@ class TestReportingEventStack(TestCase): self.assertEqual(report_finish.call_count, 0) def test_reporting_event_has_sane_repr(self): - myrep = reporting.ReportEventStack("fooname", "foodesc", - reporting_enabled=True).__repr__() + myrep = events.ReportEventStack("fooname", "foodesc", + reporting_enabled=True).__repr__() self.assertIn("fooname", myrep) self.assertIn("foodesc", myrep) self.assertIn("True", myrep) def test_set_invalid_result_raises_value_error(self): - f = reporting.ReportEventStack("myname", "mydesc") + f = events.ReportEventStack("myname", "mydesc") self.assertRaises(ValueError, setattr, f, "result", "BOGUS") class TestStatusAccess(TestCase): def test_invalid_status_access_raises_value_error(self): - self.assertRaises(AttributeError, getattr, reporting.status, "BOGUS") + self.assertRaises(AttributeError, getattr, events.status, "BOGUS") -- cgit v1.2.3 From 3c39c3f7638245e9581a2e1f4faae2dc2680f0c7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 8 Sep 2015 14:26:30 -0400 Subject: NoCloud: fix consumption of vendor-data the content of vendordata was was being assigned to vendordata, rather than vendordata_raw. The result was that it is not processed for includes or part handlers or other things as it is in other datasources. LP: #1493453 --- ChangeLog | 1 + cloudinit/sources/DataSourceNoCloud.py | 2 +- tests/unittests/test_datasource/test_nocloud.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index 7869ab7e..6fb70696 100644 --- a/ChangeLog +++ b/ChangeLog @@ -60,6 +60,7 @@ - rsyslog: add additional configuration mode (LP: #1478103) - status_wrapper in main: fix use of print_exc when handling exception - reporting: add reporting module for web hook or logging of events. + - NoCloud: fix consumption of vendordata (LP: #1493453) 0.7.6: - open 0.7.6 - Enable vendordata on CloudSigma datasource (LP: #1303986) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6a861af3..4dffe6e6 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -190,7 +190,7 @@ class DataSourceNoCloud(sources.DataSource): self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] - self.vendordata = mydata['vendor-data'] + self.vendordata_raw = mydata['vendor-data'] return True LOG.debug("%s: not claiming datasource, dsmode=%s", self, diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 85b4c25a..2d5fc37c 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -121,7 +121,7 @@ class TestNoCloudDataSource(TestCase): ret = dsrc.get_data() self.assertEqual(dsrc.userdata_raw, ud) self.assertEqual(dsrc.metadata, md) - self.assertEqual(dsrc.vendordata, vd) + self.assertEqual(dsrc.vendordata_raw, vd) self.assertTrue(ret) def test_nocloud_no_vendordata(self): -- cgit v1.2.3 From e9e86164198993aca13148872afdeebaae751c2c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 29 Sep 2015 17:17:49 -0400 Subject: MAAS: fix issues with url_helper and oauth module This would cause problems in the event that we actually had a bad clock. We add a retry in the main (for test) also, to ensure that the oauth timestamp fix gets in place. LP: #1499869 --- cloudinit/sources/DataSourceMAAS.py | 20 +++++++++++++------- cloudinit/url_helper.py | 8 ++++++-- 2 files changed, 19 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index 6c95c218..cfc59ca5 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -88,6 +88,10 @@ class DataSourceMAAS(sources.DataSource): return False try: + # doing this here actually has a side affect of + # getting oauth time-fix in place. As no where else would + # retry by default, so even if we could fix the timestamp + # we would not. if not self.wait_for_metadata_service(url): return False @@ -95,7 +99,7 @@ class DataSourceMAAS(sources.DataSource): (userdata, metadata) = read_maas_seed_url( self.base_url, read_file_or_url=self.oauth_helper.readurl, - paths=self.paths) + paths=self.paths, retries=1) self.userdata_raw = userdata self.metadata = metadata return True @@ -161,7 +165,7 @@ def read_maas_seed_dir(seed_d): def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, - version=MD_VERSION, paths=None): + version=MD_VERSION, paths=None, retries=None): """ Read the maas datasource at seed_url. read_file_or_url is a method that should provide an interface @@ -193,13 +197,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None, for name in file_order: url = files.get(name) if name == 'user-data': - retries = 0 + item_retries = 0 else: - retries = None + item_retries = retries try: ssl_details = util.fetch_ssl_details(paths) - resp = read_file_or_url(url, retries=retries, + resp = read_file_or_url(url, retries=item_retries, timeout=timeout, ssl_details=ssl_details) if resp.ok(): if name in BINARY_FIELDS: @@ -306,7 +310,8 @@ if __name__ == "__main__": oauth_helper = url_helper.OauthUrlHelper(**creds) def geturl(url): - return oauth_helper.readurl(url).contents + # the retry is to ensure that oauth timestamp gets fixed + return oauth_helper.readurl(url, retries=1).contents def printurl(url): print("== %s ==\n%s\n" % (url, geturl(url).decode())) @@ -329,7 +334,8 @@ if __name__ == "__main__": if args.url[0] == "/" or args.url.startswith("file://"): readurl = None (userdata, metadata) = read_maas_seed_url( - args.url, version=args.apiver, read_file_or_url=readurl) + args.url, version=args.apiver, read_file_or_url=readurl, + retries=2) print("=== userdata ===") print(userdata.decode()) print("=== metadata ===") diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index a93847ce..f2e1390e 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -264,7 +264,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # ssl exceptions are not going to get fixed by waiting a # few seconds break - if exception_cb and not exception_cb(req_args.copy(), excps[-1]): + if exception_cb and exception_cb(req_args.copy(), excps[-1]): + # if an exception callback was given it should return None + # a true-ish value means to break and re-raise the exception break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", @@ -404,7 +406,7 @@ class OauthUrlHelper(object): def read_skew_file(self): if self.skew_data_file and os.path.isfile(self.skew_data_file): with open(self.skew_data_file, mode="r") as fp: - return json.load(fp.read()) + return json.load(fp) return None def update_skew_file(self, host, value): @@ -412,6 +414,8 @@ class OauthUrlHelper(object): if not self.skew_data_file: return cur = self.read_skew_file() + if cur is None: + cur = {} cur[host] = value with open(self.skew_data_file, mode="w") as fp: fp.write(json.dumps(cur)) -- cgit v1.2.3 From 41900b72f31a1bd0eebe2f58a8598bfab25f0003 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 9 Oct 2015 14:01:11 +0100 Subject: Handle escaped quotes in WALinuxAgentShim.find_endpoint. This fixes bug 1488891. --- cloudinit/sources/helpers/azure.py | 2 +- tests/unittests/test_datasource/test_azure_helper.py | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 281d733e..33003da0 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -233,7 +233,7 @@ class WALinuxAgentShim(object): hex_string += hex_pair value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) else: - value = value.encode('utf-8') + value = value.replace('\\', '').encode('utf-8') endpoint_ip_address = socket.inet_ntoa(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index a5228870..68af31cd 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -97,7 +97,8 @@ class TestFindEndpoint(TestCase): if not use_hex: ip_address_repr = struct.pack( '>L', int(ip_address_repr.replace(':', ''), 16)) - ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8')) + ip_address_repr = '"{0}"'.format( + ip_address_repr.decode('utf-8').replace('"', '\\"')) return '\n'.join([ 'lease {', ' interface "eth0";', @@ -125,6 +126,13 @@ class TestFindEndpoint(TestCase): self.assertEqual(ip_address, azure_helper.WALinuxAgentShim.find_endpoint()) + def test_packed_string_with_escaped_quote(self): + ip_address = '100.72.34.108' + file_content = self._build_lease_content(ip_address, use_hex=False) + self.load_file.return_value = file_content + self.assertEqual(ip_address, + azure_helper.WALinuxAgentShim.find_endpoint()) + def test_latest_lease_used(self): ip_addresses = ['4.3.2.1', '98.76.54.32'] file_content = '\n'.join([self._build_lease_content(ip_address) -- cgit v1.2.3 From 20dc4190e27c7778cfa6c2943961f2ad27e14b48 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 9 Oct 2015 14:01:11 +0100 Subject: Handle colons in packed strings in WALinuxAgentShim.find_endpoint. This fixes bug 1488896. --- cloudinit/sources/helpers/azure.py | 12 +++++++----- tests/unittests/test_datasource/test_azure_helper.py | 7 +++++++ 2 files changed, 14 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 33003da0..21b4cd21 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -225,16 +225,18 @@ class WALinuxAgentShim(object): value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') if value is None: raise Exception('No endpoint found in DHCP config.') - if ':' in value: + unescaped_value = value.replace('\\', '') + if len(unescaped_value) > 4: hex_string = '' - for hex_pair in value.split(':'): + for hex_pair in unescaped_value.split(':'): if len(hex_pair) == 1: hex_pair = '0' + hex_pair hex_string += hex_pair - value = struct.pack('>L', int(hex_string.replace(':', ''), 16)) + packed_bytes = struct.pack( + '>L', int(hex_string.replace(':', ''), 16)) else: - value = value.replace('\\', '').encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(value) + packed_bytes = unescaped_value.encode('utf-8') + endpoint_ip_address = socket.inet_ntoa(packed_bytes) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 68af31cd..5f906837 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -133,6 +133,13 @@ class TestFindEndpoint(TestCase): self.assertEqual(ip_address, azure_helper.WALinuxAgentShim.find_endpoint()) + def test_packed_string_containing_a_colon(self): + ip_address = '100.72.58.108' + file_content = self._build_lease_content(ip_address, use_hex=False) + self.load_file.return_value = file_content + self.assertEqual(ip_address, + azure_helper.WALinuxAgentShim.find_endpoint()) + def test_latest_lease_used(self): ip_addresses = ['4.3.2.1', '98.76.54.32'] file_content = '\n'.join([self._build_lease_content(ip_address) -- cgit v1.2.3 From d78ea2f8191847242b639f23fe085a5dd8b36014 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 9 Oct 2015 14:01:11 +0100 Subject: Refactor WALinuxAgentShim.find_endpoint to use a helper method for IP address unpacking. --- cloudinit/sources/helpers/azure.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 21b4cd21..fd08be16 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -216,16 +216,8 @@ class WALinuxAgentShim(object): self.openssl_manager.clean_up() @staticmethod - def find_endpoint(): - LOG.debug('Finding Azure endpoint...') - content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') - value = None - for line in content.splitlines(): - if 'unknown-245' in line: - value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') - if value is None: - raise Exception('No endpoint found in DHCP config.') - unescaped_value = value.replace('\\', '') + def get_ip_from_lease_value(lease_value): + unescaped_value = lease_value.replace('\\', '') if len(unescaped_value) > 4: hex_string = '' for hex_pair in unescaped_value.split(':'): @@ -236,7 +228,19 @@ class WALinuxAgentShim(object): '>L', int(hex_string.replace(':', ''), 16)) else: packed_bytes = unescaped_value.encode('utf-8') - endpoint_ip_address = socket.inet_ntoa(packed_bytes) + return socket.inet_ntoa(packed_bytes) + + @staticmethod + def find_endpoint(): + LOG.debug('Finding Azure endpoint...') + content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases') + value = None + for line in content.splitlines(): + if 'unknown-245' in line: + value = line.strip(' ').split(' ', 2)[-1].strip(';\n"') + if value is None: + raise Exception('No endpoint found in DHCP config.') + endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value) LOG.debug('Azure endpoint found at %s', endpoint_ip_address) return endpoint_ip_address -- cgit v1.2.3 From 92ceca45c5d2983742ce18d2e8b2e671629ef4b0 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 14 Oct 2015 16:32:35 -0700 Subject: AZURE: support extracting SSH key values from ovf-env.xml Azure has or will be offering shortly the ability to directly define the SSH key value instead of a fingerprint in the ovf-env.xml file. This patch favors defined SSH keys over the fingerprint method (LP: #1506244). --- cloudinit/sources/DataSourceAzure.py | 16 ++++++--- tests/unittests/test_datasource/test_azure.py | 52 ++++++++++++++++++++++----- 2 files changed, 56 insertions(+), 12 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ff950deb..eb9fd042 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -148,9 +148,15 @@ class DataSourceAzureNet(sources.DataSource): wait_for = [shcfgxml] fp_files = [] + key_value = None for pk in self.cfg.get('_pubkeys', []): - bname = str(pk['fingerprint'] + ".crt") - fp_files += [os.path.join(ddir, bname)] + if pk.get('value', None): + key_value = pk['value'] + LOG.info("ssh authentication: using value from fabric") + else: + bname = str(pk['fingerprint'] + ".crt") + fp_files += [os.path.join(ddir, bname)] + LOG.info("ssh authentication: using fingerprint from fabirc") missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, @@ -166,7 +172,8 @@ class DataSourceAzureNet(sources.DataSource): metadata['instance-id'] = iid_from_shared_config(shcfgxml) except ValueError as e: LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = pubkeys_from_crt_files(fp_files) + + metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata def get_data(self): @@ -497,7 +504,8 @@ def load_azure_ovf_pubkeys(sshnode): for pk_node in pubkeys: if not pk_node.hasChildNodes(): continue - cur = {'fingerprint': "", 'path': ""} + + cur = {'fingerprint': "", 'path': "", 'value': ""} for child in pk_node.childNodes: if child.nodeType == text_node or not child.localName: continue diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 8952374f..ec0435f5 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -54,10 +54,13 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): if pubkeys: content += "\n" - for fp, path in pubkeys: + for fp, path, value in pubkeys: content += " " - content += ("%s%s" % - (fp, path)) + if fp and path: + content += ("%s%s" % + (fp, path)) + if value: + content += "%s" % value content += "\n" content += "" content += """ @@ -297,10 +300,10 @@ class TestAzureDataSource(TestCase): self.assertFalse(ret) self.assertFalse('agent_invoked' in data) - def test_cfg_has_pubkeys(self): + def test_cfg_has_pubkeys_fingerprint(self): odata = {'HostName': "myhost", 'UserName': "myuser"} - mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] - pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] data = {'ovfcontent': construct_valid_ovf_env(data=odata, pubkeys=pubkeys)} @@ -309,6 +312,39 @@ class TestAzureDataSource(TestCase): self.assertTrue(ret) for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) + self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1]) + + def test_cfg_has_pubkeys_value(self): + # make sure that provided key is used over fingerprint + odata = {'HostName': "myhost", 'UserName': "myuser"} + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + data = {'ovfcontent': construct_valid_ovf_env(data=odata, + pubkeys=pubkeys)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + for mypk in mypklist: + self.assertIn(mypk, dsrc.cfg['_pubkeys']) + self.assertIn(mypk['value'], dsrc.metadata['public-keys']) + + def test_cfg_has_no_fingerprint_has_value(self): + # test value is used when fingerprint not provided + odata = {'HostName': "myhost", 'UserName': "myuser"} + mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] + data = {'ovfcontent': construct_valid_ovf_env(data=odata, + pubkeys=pubkeys)} + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + for mypk in mypklist: + self.assertIn(mypk['value'], dsrc.metadata['public-keys']) + def test_default_ephemeral(self): # make sure the ephemeral device works @@ -642,8 +678,8 @@ class TestReadAzureOvf(TestCase): DataSourceAzure.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): - mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}] - pubkeys = [(x['fingerprint'], x['path']) for x in mypklist] + mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] + pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist] content = construct_valid_ovf_env(pubkeys=pubkeys) (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content) for mypk in mypklist: -- cgit v1.2.3 From c67d5c7e5e6c96f6cd4c2587110f592089f327bd Mon Sep 17 00:00:00 2001 From: Darren Worrall Date: Tue, 20 Oct 2015 09:44:50 +0100 Subject: Remove --quiet option from udevadm in AltCloud --quiet is no longer supported LP: #1507526 --- cloudinit/sources/DataSourceAltCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py index fb528ae5..60d58d6d 100644 --- a/cloudinit/sources/DataSourceAltCloud.py +++ b/cloudinit/sources/DataSourceAltCloud.py @@ -41,7 +41,7 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info' # Shell command lists CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy'] -CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5'] +CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5'] META_DATA_NOT_SUPPORTED = { 'block-device-mapping': {}, -- cgit v1.2.3 From 34b208a05361ae6ab4a51a6a999c9ac4ab77f06a Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 30 Oct 2015 16:26:31 +0000 Subject: Use DMI data to find Azure instance IDs. This replaces the use of SharedConfig.xml in both the walinuxagent case, and the case where we communicate with the Azure fabric ourselves. --- cloudinit/sources/DataSourceAzure.py | 38 +---------- cloudinit/sources/helpers/azure.py | 21 ------ tests/unittests/test_datasource/test_azure.py | 77 +++++----------------- .../unittests/test_datasource/test_azure_helper.py | 42 +----------- 4 files changed, 23 insertions(+), 155 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c6228e6c..bd80a8a6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -31,8 +31,7 @@ from cloudinit import log as logging from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util -from cloudinit.sources.helpers.azure import ( - get_metadata_from_fabric, iid_from_shared_config_content) +from cloudinit.sources.helpers.azure import get_metadata_from_fabric LOG = logging.getLogger(__name__) @@ -41,7 +40,6 @@ DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] BOUNCE_COMMAND = ['sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] -DATA_DIR_CLEAN_LIST = ['SharedConfig.xml'] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START, @@ -144,8 +142,6 @@ class DataSourceAzureNet(sources.DataSource): self.ds_cfg['agent_command']) ddir = self.ds_cfg['data_dir'] - shcfgxml = os.path.join(ddir, "SharedConfig.xml") - wait_for = [shcfgxml] fp_files = [] key_value = None @@ -160,19 +156,11 @@ class DataSourceAzureNet(sources.DataSource): missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, - args=(wait_for + fp_files,)) + args=(fp_files,)) if len(missing): LOG.warn("Did not find files, but going on: %s", missing) metadata = {} - if shcfgxml in missing: - LOG.warn("SharedConfig.xml missing, using static instance-id") - else: - try: - metadata['instance-id'] = iid_from_shared_config(shcfgxml) - except ValueError as e: - LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) - metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files) return metadata @@ -229,21 +217,6 @@ class DataSourceAzureNet(sources.DataSource): user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) - if found != ddir: - cached_ovfenv = util.load_file( - os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False) - if cached_ovfenv != files['ovf-env.xml']: - # source was not walinux-agent's datadir, so we have to clean - # up so 'wait_for_files' doesn't return early due to stale data - cleaned = [] - for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]: - if os.path.exists(f): - util.del_file(f) - cleaned.append(f) - if cleaned: - LOG.info("removed stale file(s) in '%s': %s", - ddir, str(cleaned)) - # walinux agent writes files world readable, but expects # the directory to be protected. write_files(ddir, files, dirmode=0o700) @@ -259,6 +232,7 @@ class DataSourceAzureNet(sources.DataSource): " on Azure.", exc_info=True) return False + self.metadata['instance-id'] = util.read_dmi_data('system-uuid') self.metadata.update(fabric_data) found_ephemeral = find_fabric_formatted_ephemeral_disk() @@ -649,12 +623,6 @@ def load_azure_ds_dir(source_dir): return (md, ud, cfg, {'ovf-env.xml': contents}) -def iid_from_shared_config(path): - with open(path, "rb") as fp: - content = fp.read() - return iid_from_shared_config_content(content) - - class BrokenAzureDataSource(Exception): pass diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py index 281d733e..d90c22fd 100644 --- a/cloudinit/sources/helpers/azure.py +++ b/cloudinit/sources/helpers/azure.py @@ -78,12 +78,6 @@ class GoalState(object): return self._text_from_xpath( './Container/RoleInstanceList/RoleInstance/InstanceId') - @property - def shared_config_xml(self): - url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance' - '/Configuration/SharedConfig') - return self.http_client.get(url).contents - @property def certificates_xml(self): if self._certificates_xml is None: @@ -172,19 +166,6 @@ class OpenSSLManager(object): return keys -def iid_from_shared_config_content(content): - """ - find INSTANCE_ID in: - - - - - """ - root = ElementTree.fromstring(content) - depnode = root.find('Deployment') - return depnode.get('name') - - class WALinuxAgentShim(object): REPORT_READY_XML_TEMPLATE = '\n'.join([ @@ -263,8 +244,6 @@ class WALinuxAgentShim(object): public_keys = self.openssl_manager.parse_certificates( goal_state.certificates_xml) data = { - 'instance-id': iid_from_shared_config_content( - goal_state.shared_config_xml), 'public-keys': public_keys, } self._report_ready(goal_state, http_client) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index ec0435f5..3933794f 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -115,10 +115,6 @@ class TestAzureDataSource(TestCase): data['pubkey_files'] = flist return ["pubkey_from: %s" % f for f in flist] - def _iid_from_shared_config(path): - data['iid_from_shared_cfg'] = path - return 'i-my-azure-id' - if data.get('ovfcontent') is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': data['ovfcontent']}) @@ -127,20 +123,22 @@ class TestAzureDataSource(TestCase): mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d self.get_metadata_from_fabric = mock.MagicMock(return_value={ - 'instance-id': 'i-my-azure-id', 'public-keys': [], }) + self.instance_id = 'test-instance-id' + self.apply_patches([ (mod, 'list_possible_azure_ds_devs', dsdevs), (mod, 'invoke_agent', _invoke_agent), (mod, 'wait_for_files', _wait_for_files), (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files), - (mod, 'iid_from_shared_config', _iid_from_shared_config), (mod, 'perform_hostname_bounce', mock.MagicMock()), (mod, 'get_hostname', mock.MagicMock()), (mod, 'set_hostname', mock.MagicMock()), (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric), + (mod.util, 'read_dmi_data', mock.MagicMock( + return_value=self.instance_id)), ]) dsrc = mod.DataSourceAzureNet( @@ -193,7 +191,6 @@ class TestAzureDataSource(TestCase): self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName']) self.assertTrue(os.path.isfile( os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id') def test_waagent_d_has_0700_perms(self): # we expect /var/lib/waagent to be created 0700 @@ -345,7 +342,6 @@ class TestAzureDataSource(TestCase): for mypk in mypklist: self.assertIn(mypk['value'], dsrc.metadata['public-keys']) - def test_default_ephemeral(self): # make sure the ephemeral device works odata = {} @@ -434,54 +430,6 @@ class TestAzureDataSource(TestCase): dsrc = self._get_ds({'ovfcontent': xml}) dsrc.get_data() - def test_existing_ovf_same(self): - # waagent/SharedConfig left alone if found ovf-env.xml same as cached - odata = {'UserData': b64e("SOMEUSERDATA")} - data = {'ovfcontent': construct_valid_ovf_env(data=odata)} - - populate_dir(self.waagent_d, - {'ovf-env.xml': data['ovfcontent'], - 'otherfile': 'otherfile-content', - 'SharedConfig.xml': 'mysharedconfig'}) - - dsrc = self._get_ds(data) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'otherfile'))) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'SharedConfig.xml'))) - - def test_existing_ovf_diff(self): - # waagent/SharedConfig must be removed if ovfenv is found elsewhere - - # 'get_data' should remove SharedConfig.xml in /var/lib/waagent - # if ovf-env.xml differs. - cached_ovfenv = construct_valid_ovf_env( - {'userdata': b64e("FOO_USERDATA")}) - new_ovfenv = construct_valid_ovf_env( - {'userdata': b64e("NEW_USERDATA")}) - - populate_dir(self.waagent_d, - {'ovf-env.xml': cached_ovfenv, - 'SharedConfig.xml': "mysharedconfigxml", - 'otherfile': 'otherfilecontent'}) - - dsrc = self._get_ds({'ovfcontent': new_ovfenv}) - ret = dsrc.get_data() - self.assertTrue(ret) - self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA") - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'otherfile'))) - self.assertFalse(os.path.exists( - os.path.join(self.waagent_d, 'SharedConfig.xml'))) - self.assertTrue(os.path.exists( - os.path.join(self.waagent_d, 'ovf-env.xml'))) - new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml')) - self.xml_equals(new_ovfenv, new_xml) - def test_exception_fetching_fabric_data_doesnt_propagate(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) ds.ds_cfg['agent_command'] = '__builtin__' @@ -496,6 +444,17 @@ class TestAzureDataSource(TestCase): self.assertTrue(ret) self.assertEqual('value', ds.metadata['test']) + def test_instance_id_from_dmidecode_used(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + + def test_instance_id_from_dmidecode_used_for_builtin(self): + ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + ds.ds_cfg['agent_command'] = '__builtin__' + ds.get_data() + self.assertEqual(self.instance_id, ds.metadata['instance-id']) + class TestAzureBounce(TestCase): @@ -504,9 +463,6 @@ class TestAzureBounce(TestCase): mock.patch.object(DataSourceAzure, 'invoke_agent')) self.patches.enter_context( mock.patch.object(DataSourceAzure, 'wait_for_files')) - self.patches.enter_context( - mock.patch.object(DataSourceAzure, 'iid_from_shared_config', - mock.MagicMock(return_value='i-my-azure-id'))) self.patches.enter_context( mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs', mock.MagicMock(return_value=[]))) @@ -521,6 +477,9 @@ class TestAzureBounce(TestCase): self.patches.enter_context( mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + self.patches.enter_context( + mock.patch.object(DataSourceAzure.util, 'read_dmi_data', + mock.MagicMock(return_value='test-instance-id'))) def setUp(self): super(TestAzureBounce, self).setUp() diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index a5228870..0638c974 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -40,7 +40,7 @@ GOAL_STATE_TEMPLATE = """\ http://100.86.192.70:80/...hostingEnvironmentConfig... - {shared_config_url} + http://100.86.192.70:80/..SharedConfig.. http://100.86.192.70:80/...extensionsConfig... @@ -55,21 +55,6 @@ GOAL_STATE_TEMPLATE = """\ """ -class TestReadAzureSharedConfig(unittest.TestCase): - - def test_valid_content(self): - xml = """ - - - - - - - """ - ret = azure_helper.iid_from_shared_config_content(xml) - self.assertEqual("MY_INSTANCE_ID", ret) - - class TestFindEndpoint(TestCase): def setUp(self): @@ -140,7 +125,6 @@ class TestGoalStateParsing(TestCase): 'incarnation': 1, 'container_id': 'MyContainerId', 'instance_id': 'MyInstanceId', - 'shared_config_url': 'MySharedConfigUrl', 'certificates_url': 'MyCertificatesUrl', } @@ -174,20 +158,9 @@ class TestGoalStateParsing(TestCase): goal_state = self._get_goal_state(instance_id=instance_id) self.assertEqual(instance_id, goal_state.instance_id) - def test_shared_config_xml_parsed_and_fetched_correctly(self): - http_client = mock.MagicMock() - shared_config_url = 'TestSharedConfigUrl' - goal_state = self._get_goal_state( - http_client=http_client, shared_config_url=shared_config_url) - shared_config_xml = goal_state.shared_config_xml - self.assertEqual(1, http_client.get.call_count) - self.assertEqual(shared_config_url, http_client.get.call_args[0][0]) - self.assertEqual(http_client.get.return_value.contents, - shared_config_xml) - def test_certificates_xml_parsed_and_fetched_correctly(self): http_client = mock.MagicMock() - certificates_url = 'TestSharedConfigUrl' + certificates_url = 'TestCertificatesUrl' goal_state = self._get_goal_state( http_client=http_client, certificates_url=certificates_url) certificates_xml = goal_state.certificates_xml @@ -324,8 +297,6 @@ class TestWALinuxAgentShim(TestCase): azure_helper.WALinuxAgentShim, 'find_endpoint')) self.GoalState = patches.enter_context( mock.patch.object(azure_helper, 'GoalState')) - self.iid_from_shared_config_content = patches.enter_context( - mock.patch.object(azure_helper, 'iid_from_shared_config_content')) self.OpenSSLManager = patches.enter_context( mock.patch.object(azure_helper, 'OpenSSLManager')) patches.enter_context( @@ -367,15 +338,6 @@ class TestWALinuxAgentShim(TestCase): data = shim.register_with_azure_and_fetch_data() self.assertEqual([], data['public-keys']) - def test_instance_id_returned_in_data(self): - shim = azure_helper.WALinuxAgentShim() - data = shim.register_with_azure_and_fetch_data() - self.assertEqual( - [mock.call(self.GoalState.return_value.shared_config_xml)], - self.iid_from_shared_config_content.call_args_list) - self.assertEqual(self.iid_from_shared_config_content.return_value, - data['instance-id']) - def test_correct_url_used_for_report_ready(self): self.find_endpoint.return_value = 'test_endpoint' shim = azure_helper.WALinuxAgentShim() -- cgit v1.2.3 From 8844ffb5988bcfbb8cfbe57d9139c3dcb8b429cc Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Wed, 18 Nov 2015 16:03:15 -0800 Subject: Add Image Customization Parser for VMware vSphere Hypervisor Support. This is the first changeset submitted as a part of project to add cloud-init support for VMware vSphere Hypervisor. This changeset contains _only_ the changes for a simple python parser for a Image Customization Specification file pushed by VMware vSphere hypervisor into the guest VMs. In a later changeset, will be submitting another patch to actually detect the underlying VMware vSphere hypervisor and do the necessary customization. --- cloudinit/sources/helpers/vmware/__init__.py | 13 ++ cloudinit/sources/helpers/vmware/imc/__init__.py | 13 ++ cloudinit/sources/helpers/vmware/imc/boot_proto.py | 11 + cloudinit/sources/helpers/vmware/imc/config.py | 125 ++++++++++++ .../sources/helpers/vmware/imc/config_file.py | 221 +++++++++++++++++++++ .../sources/helpers/vmware/imc/config_namespace.py | 5 + .../sources/helpers/vmware/imc/config_source.py | 2 + cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 29 +++ cloudinit/sources/helpers/vmware/imc/nic.py | 107 ++++++++++ 9 files changed, 526 insertions(+) create mode 100644 cloudinit/sources/helpers/vmware/__init__.py create mode 100644 cloudinit/sources/helpers/vmware/imc/__init__.py create mode 100644 cloudinit/sources/helpers/vmware/imc/boot_proto.py create mode 100644 cloudinit/sources/helpers/vmware/imc/config.py create mode 100644 cloudinit/sources/helpers/vmware/imc/config_file.py create mode 100644 cloudinit/sources/helpers/vmware/imc/config_namespace.py create mode 100644 cloudinit/sources/helpers/vmware/imc/config_source.py create mode 100644 cloudinit/sources/helpers/vmware/imc/ipv4_mode.py create mode 100644 cloudinit/sources/helpers/vmware/imc/nic.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py new file mode 100644 index 00000000..386225d5 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/__init__.py @@ -0,0 +1,13 @@ +# vi: ts=4 expandtab +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py new file mode 100644 index 00000000..6c3b070a --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -0,0 +1,11 @@ +# from enum import Enum + +class BootProto: + DHCP = 'dhcp' + STATIC = 'static' + +# def __eq__(self, other): +# return self.name == other.name and self.value == other.value +# +# def __ne__(self, other): +# return not self.__eq__(other) diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py new file mode 100644 index 00000000..ea0873fb --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -0,0 +1,125 @@ +from cloudinit.sources.helpers.vmware.imc.nic import Nic + + +class Config: + DNS = 'DNS|NAMESERVER|' + SUFFIX = 'DNS|SUFFIX|' + PASS = 'PASSWORD|-PASS' + TIMEZONE = 'DATETIME|TIMEZONE' + UTC = 'DATETIME|UTC' + HOSTNAME = 'NETWORK|HOSTNAME' + OMAINNAME = 'NETWORK|DOMAINNAME' + + def __init__(self, configFile): + self._configFile = configFile + + # Retrieves hostname. + # + # Args: + # None + # Results: + # string: hostname + # Throws: + # None + @property + def hostName(self): + return self._configFile.get(Config.HOSTNAME, None) + + # Retrieves domainName. + # + # Args: + # None + # Results: + # string: domainName + # Throws: + # None + @property + def domainName(self): + return self._configFile.get(Config.DOMAINNAME, None) + + # Retrieves timezone. + # + # Args: + # None + # Results: + # string: timezone + # Throws: + # None + @property + def timeZone(self): + return self._configFile.get(Config.TIMEZONE, None) + + # Retrieves whether to set time to UTC or Local. + # + # Args: + # None + # Results: + # boolean: True for yes/YES, True for no/NO, otherwise - None + # Throws: + # None + @property + def utc(self): + return self._configFile.get(Config.UTC, None) + + # Retrieves root password to be set. + # + # Args: + # None + # Results: + # string: base64-encoded root password or None + # Throws: + # None + @property + def adminPassword(self): + return self._configFile.get(Config.PASS, None) + + # Retrieves DNS Servers. + # + # Args: + # None + # Results: + # integer: count or 0 + # Throws: + # None + @property + def nameServers(self): + res = [] + for i in range(1, self._configFile.getCnt(Config.DNS) + 1): + key = Config.DNS + str(i) + res.append(self._configFile[key]) + + return res + + # Retrieves DNS Suffixes. + # + # Args: + # None + # Results: + # integer: count or 0 + # Throws: + # None + @property + def dnsSuffixes(self): + res = [] + for i in range(1, self._configFile.getCnt(Config.SUFFIX) + 1): + key = Config.SUFFIX + str(i) + res.append(self._configFile[key]) + + return res + + # Retrieves NICs. + # + # Args: + # None + # Results: + # integer: count + # Throws: + # None + @property + def nics(self): + res = [] + nics = self._configFile['NIC-CONFIG|NICS'] + for nic in nics.split(','): + res.append(Nic(nic, self._configFile)) + + return res diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py new file mode 100644 index 00000000..3f9938da --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -0,0 +1,221 @@ +import logging +import re + +from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource + +logger = logging.getLogger(__name__) + + +class ConfigFile(ConfigSource): + def __init__(self): + self._configData = {} + + def __getitem__(self, key): + return self._configData[key] + + def get(self, key, default=None): + return self._configData.get(key, default) + + # Removes all the properties. + # + # Args: + # None + # Results: + # None + # Throws: + # None + def clear(self): + self._configData.clear() + + # Inserts k/v pair. + # + # Does not do any key/cross-key validation. + # + # Args: + # key: string: key + # val: string: value + # Results: + # None + # Throws: + # None + def _insertKey(self, key, val): + # cleaning up on all "input" path + + # remove end char \n (chomp) + key = key.strip() + val = val.strip() + + if key.startswith('-') or '|-' in key: + canLog = 0 + else: + canLog = 1 + + # "sensitive" settings shall not be logged + if canLog: + logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + else: + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + + self._configData[key] = val + + # Determines properties count. + # + # Args: + # None + # Results: + # integer: properties count + # Throws: + # None + def size(self): + return len(self._configData) + + # Parses properties from a .cfg file content. + # + # Any previously available properties will be removed. + # + # Sensitive data will not be logged in case key starts from '-'. + # + # Args: + # content: string: e.g. content of config/cust.cfg + # Results: + # None + # Throws: + # None + def loadConfigContent(self, content): + self.clear() + + # remove end char \n (chomp) + for line in content.split('\n'): + # TODO validate against allowed characters (not done in Perl) + + # spaces at the end are not allowed, things like passwords must be + # at least base64-encoded + line = line.strip() + + # "sensitive" settings shall not be logged + if line.startswith('-'): + canLog = 0 + else: + canLog = 1 + + if canLog: + logger.debug("Processing line: '%s'" % line) + else: + logger.debug("Processing line: '***********************'") + + if not line: + logger.debug("Empty line. Ignored.") + continue + + if line.startswith('#'): + logger.debug("Comment found. Line ignored.") + continue + + matchObj = re.match(r'\[(.+)\]', line) + if matchObj: + category = matchObj.group(1) + logger.debug("FOUND CATEGORY = '%s'" % category) + else: + # POSIX.2 regex doesn't support non-greedy like in (.+?)=(.*) + # key value pair (non-eager '=' for base64) + matchObj = re.match(r'([^=]+)=(.*)', line) + if matchObj: + # cleaning up on all "input" paths + key = category + "|" + matchObj.group(1).strip() + val = matchObj.group(2).strip() + + self._insertKey(key, val) + else: + # TODO document + raise Exception("Unrecognizable line: '%s'" % line) + + self.validate() + + # Parses properties from a .cfg file + # + # Any previously available properties will be removed. + # + # Sensitive data will not be logged in case key starts from '-'. + # + # Args: + # filename: string: full path to a .cfg file + # Results: + # None + # Throws: + # None + def loadConfigFile(self, filename): + logger.info("Opening file name %s." % filename) + # TODO what throws? + with open(filename, "r") as myfile: + self.loadConfigContent(myfile.read()) + + # Determines whether a property with a given key exists. + # + # Args: + # key: string: key + # Results: + # boolean: True if such property exists, otherwise - False. + # Throws: + # None + def hasKey(self, key): + return key in self._configData + + # Determines whether a value for a property must be kept. + # + # If the property is missing, it's treated as it should be not changed by + # the engine. + # + # Args: + # key: string: key + # Results: + # boolean: True if property must be kept, otherwise - False. + # Throws: + # None + def keepCurrentValue(self, key): + # helps to distinguish from "empty" value which is used to indicate + # "removal" + return not self.hasKey(key) + + # Determines whether a value for a property must be removed. + # + # If the property is empty, it's treated as it should be removed by the + # engine. + # + # Args: + # key: string: key + # Results: + # boolean: True if property must be removed, otherwise - False. + # Throws: + # None + def removeCurrentValue(self, key): + # helps to distinguish from "missing" value which is used to indicate + # "keeping unchanged" + if self.hasKey(key): + return not bool(self._configData[key]) + else: + return False + + # TODO + def getCnt(self, prefix): + res = 0 + for key in self._configData.keys(): + if key.startswith(prefix): + res += 1 + + return res + + # TODO + # TODO pass base64 + # Throws: + # Dies in case timezone is present but empty. + # Dies in case password is present but empty. + # Dies in case hostname is present but empty or greater than 63 chars. + # Dies in case UTC is present, but is not yes/YES or no/NO. + # Dies in case NICS is not present. + def validate(self): + # TODO must log all the errors + keyValidators = {'NIC1|IPv6GATEWAY|': None} + crossValidators = {} + + for key in self._configData.keys(): + pass diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py new file mode 100644 index 00000000..7f76ac8b --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py @@ -0,0 +1,5 @@ +from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource + + +class ConfigNamespace(ConfigSource): + pass diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py new file mode 100644 index 00000000..fad3a389 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_source.py @@ -0,0 +1,2 @@ +class ConfigSource: + pass diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py new file mode 100644 index 00000000..66b4fad7 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -0,0 +1,29 @@ +# from enum import Enum + + +# The IPv4 configuration mode which directly represents the user's goal. +# +# This mode effectively acts as a contract of the inguest customization engine. +# It must be set based on what the user has requested via VMODL/generators API +# and should not be changed by those layers. It's up to the in-guest engine to +# interpret and materialize the user's request. +# +# Also defined in linuxconfiggenerator.h. +class Ipv4Mode: + # The legacy mode which only allows dhcp/static based on whether IPv4 + # addresses list is empty or not + IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' + # IPv4 must use static address. Reserved for future use + IPV4_MODE_STATIC = 'STATIC' + # IPv4 must use DHCPv4. Reserved for future use + IPV4_MODE_DHCP = 'DHCP' + # IPv4 must be disabled + IPV4_MODE_DISABLED = 'DISABLED' + # IPv4 settings should be left untouched. Reserved for future use + IPV4_MODE_AS_IS = 'AS_IS' + + # def __eq__(self, other): + # return self.name == other.name and self.value == other.value + # + # def __ne__(self, other): + # return not self.__eq__(other) diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py new file mode 100644 index 00000000..b90a5640 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -0,0 +1,107 @@ +from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProto + + +class Nic: + def __init__(self, name, configFile): + self._name = name + self._configFile = configFile + + def _get(self, what): + return self._configFile.get(self.name + what, None) + + def _getCnt(self, prefix): + return self._configFile.getCnt(self.name + prefix) + + @property + def name(self): + return self._name + + @property + def mac(self): + return self._get('|MACADDR').lower() + + @property + def bootProto(self): + return self._get('|BOOTPROTO').lower() + + @property + def ipv4(self): + # TODO implement NONE + if self.bootProto == BootProto.STATIC: + return StaticIpv4Conf(self) + + return DhcpIpv4Conf(self) + + @property + def ipv6(self): + # TODO implement NONE + cnt = self._getCnt("|IPv6ADDR|") + + if cnt != 0: + return StaticIpv6Conf(self) + + return DhcpIpv6Conf(self) + + +class DhcpIpv4Conf: + def __init__(self, nic): + self._nic = nic + + +class StaticIpv4Addr: + def __init__(self, nic): + self._nic = nic + + @property + def ip(self): + return self._nic._get('|IPADDR') + + @property + def netmask(self): + return self._nic._get('|NETMASK') + + @property + def gateway(self): + return self._nic._get('|GATEWAY') + + +class StaticIpv4Conf(DhcpIpv4Conf): + @property + def addrs(self): + return [StaticIpv4Addr(self._nic)] + + +class DhcpIpv6Conf: + def __init__(self, nic): + self._nic = nic + + +class StaticIpv6Addr: + def __init__(self, nic, index): + self._nic = nic + self._index = index + + @property + def ip(self): + return self._nic._get("|IPv6ADDR|" + str(self._index)) + + @property + def prefix(self): + return self._nic._get("|IPv6NETMASK|" + str(self._index)) + + @property + def gateway(self): + return self._nic._get("|IPv6GATEWAY|" + str(self._index)) + + +class StaticIpv6Conf(DhcpIpv6Conf): + @property + def addrs(self): + cnt = self._nic._getCnt("|IPv6ADDR|") + + res = [] + + for i in range(1, cnt + 1): + res.append(StaticIpv6Addr(self._nic, i)) + + return res -- cgit v1.2.3 From 8d9e5bd7fcda8f56a4fe087150db1456af738335 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 5 Jan 2016 12:05:11 -0800 Subject: Fixed all the styling nits. Used proper naming convention for the methods. Added proper documentation. Checked pep8 and flake8 output and no issues were reported. --- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 28 +- cloudinit/sources/helpers/vmware/imc/config.py | 116 +++---- .../sources/helpers/vmware/imc/config_file.py | 372 +++++++++------------ .../sources/helpers/vmware/imc/config_namespace.py | 22 +- .../sources/helpers/vmware/imc/config_source.py | 21 ++ cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 74 ++-- cloudinit/sources/helpers/vmware/imc/nic.py | 254 ++++++++------ 7 files changed, 448 insertions(+), 439 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py index 6c3b070a..abfffd75 100644 --- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -1,11 +1,25 @@ -# from enum import Enum +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + class BootProto: + """Specifies the NIC Boot Settings.""" + DHCP = 'dhcp' STATIC = 'static' - -# def __eq__(self, other): -# return self.name == other.name and self.value == other.value -# -# def __ne__(self, other): -# return not self.__eq__(other) diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index ea0873fb..7eee47a5 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -1,122 +1,90 @@ -from cloudinit.sources.helpers.vmware.imc.nic import Nic +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from .nic import Nic class Config: + """ + Stores the Contents specified in the Customization + Specification file. + """ + DNS = 'DNS|NAMESERVER|' SUFFIX = 'DNS|SUFFIX|' PASS = 'PASSWORD|-PASS' TIMEZONE = 'DATETIME|TIMEZONE' UTC = 'DATETIME|UTC' HOSTNAME = 'NETWORK|HOSTNAME' - OMAINNAME = 'NETWORK|DOMAINNAME' + DOMAINNAME = 'NETWORK|DOMAINNAME' def __init__(self, configFile): self._configFile = configFile - # Retrieves hostname. - # - # Args: - # None - # Results: - # string: hostname - # Throws: - # None @property - def hostName(self): + def host_name(self): + """Return the hostname.""" return self._configFile.get(Config.HOSTNAME, None) - # Retrieves domainName. - # - # Args: - # None - # Results: - # string: domainName - # Throws: - # None @property - def domainName(self): + def domain_name(self): + """Return the domain name.""" return self._configFile.get(Config.DOMAINNAME, None) - # Retrieves timezone. - # - # Args: - # None - # Results: - # string: timezone - # Throws: - # None @property - def timeZone(self): + def timezone(self): + """Return the timezone.""" return self._configFile.get(Config.TIMEZONE, None) - # Retrieves whether to set time to UTC or Local. - # - # Args: - # None - # Results: - # boolean: True for yes/YES, True for no/NO, otherwise - None - # Throws: - # None @property def utc(self): + """Retrieves whether to set time to UTC or Local.""" return self._configFile.get(Config.UTC, None) - # Retrieves root password to be set. - # - # Args: - # None - # Results: - # string: base64-encoded root password or None - # Throws: - # None @property - def adminPassword(self): + def admin_password(self): + """Return the root password to be set.""" return self._configFile.get(Config.PASS, None) - # Retrieves DNS Servers. - # - # Args: - # None - # Results: - # integer: count or 0 - # Throws: - # None @property - def nameServers(self): + def name_servers(self): + """Return the list of DNS servers.""" res = [] - for i in range(1, self._configFile.getCnt(Config.DNS) + 1): + for i in range(1, self._configFile.get_count(Config.DNS) + 1): key = Config.DNS + str(i) res.append(self._configFile[key]) return res - # Retrieves DNS Suffixes. - # - # Args: - # None - # Results: - # integer: count or 0 - # Throws: - # None @property - def dnsSuffixes(self): + def dns_suffixes(self): + """Return the list of DNS Suffixes.""" res = [] - for i in range(1, self._configFile.getCnt(Config.SUFFIX) + 1): + for i in range(1, self._configFile.get_count(Config.SUFFIX) + 1): key = Config.SUFFIX + str(i) res.append(self._configFile[key]) return res - # Retrieves NICs. - # - # Args: - # None - # Results: - # integer: count - # Throws: - # None @property def nics(self): + """Return the list of associated NICs.""" res = [] nics = self._configFile['NIC-CONFIG|NICS'] for nic in nics.split(','): diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 3f9938da..e08a2a9a 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -1,221 +1,151 @@ -import logging -import re - -from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource - -logger = logging.getLogger(__name__) - - -class ConfigFile(ConfigSource): - def __init__(self): - self._configData = {} - - def __getitem__(self, key): - return self._configData[key] - - def get(self, key, default=None): - return self._configData.get(key, default) - - # Removes all the properties. - # - # Args: - # None - # Results: - # None - # Throws: - # None - def clear(self): - self._configData.clear() - - # Inserts k/v pair. - # - # Does not do any key/cross-key validation. - # - # Args: - # key: string: key - # val: string: value - # Results: - # None - # Throws: - # None - def _insertKey(self, key, val): - # cleaning up on all "input" path - - # remove end char \n (chomp) - key = key.strip() - val = val.strip() - - if key.startswith('-') or '|-' in key: - canLog = 0 - else: - canLog = 1 - - # "sensitive" settings shall not be logged - if canLog: - logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) - else: - logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) - - self._configData[key] = val - - # Determines properties count. - # - # Args: - # None - # Results: - # integer: properties count - # Throws: - # None - def size(self): - return len(self._configData) - - # Parses properties from a .cfg file content. - # - # Any previously available properties will be removed. - # - # Sensitive data will not be logged in case key starts from '-'. - # - # Args: - # content: string: e.g. content of config/cust.cfg - # Results: - # None - # Throws: - # None - def loadConfigContent(self, content): - self.clear() - - # remove end char \n (chomp) - for line in content.split('\n'): - # TODO validate against allowed characters (not done in Perl) - - # spaces at the end are not allowed, things like passwords must be - # at least base64-encoded - line = line.strip() - - # "sensitive" settings shall not be logged - if line.startswith('-'): - canLog = 0 - else: - canLog = 1 - - if canLog: - logger.debug("Processing line: '%s'" % line) - else: - logger.debug("Processing line: '***********************'") - - if not line: - logger.debug("Empty line. Ignored.") - continue - - if line.startswith('#'): - logger.debug("Comment found. Line ignored.") - continue - - matchObj = re.match(r'\[(.+)\]', line) - if matchObj: - category = matchObj.group(1) - logger.debug("FOUND CATEGORY = '%s'" % category) - else: - # POSIX.2 regex doesn't support non-greedy like in (.+?)=(.*) - # key value pair (non-eager '=' for base64) - matchObj = re.match(r'([^=]+)=(.*)', line) - if matchObj: - # cleaning up on all "input" paths - key = category + "|" + matchObj.group(1).strip() - val = matchObj.group(2).strip() - - self._insertKey(key, val) - else: - # TODO document - raise Exception("Unrecognizable line: '%s'" % line) - - self.validate() - - # Parses properties from a .cfg file - # - # Any previously available properties will be removed. - # - # Sensitive data will not be logged in case key starts from '-'. - # - # Args: - # filename: string: full path to a .cfg file - # Results: - # None - # Throws: - # None - def loadConfigFile(self, filename): - logger.info("Opening file name %s." % filename) - # TODO what throws? - with open(filename, "r") as myfile: - self.loadConfigContent(myfile.read()) - - # Determines whether a property with a given key exists. - # - # Args: - # key: string: key - # Results: - # boolean: True if such property exists, otherwise - False. - # Throws: - # None - def hasKey(self, key): - return key in self._configData - - # Determines whether a value for a property must be kept. - # - # If the property is missing, it's treated as it should be not changed by - # the engine. - # - # Args: - # key: string: key - # Results: - # boolean: True if property must be kept, otherwise - False. - # Throws: - # None - def keepCurrentValue(self, key): - # helps to distinguish from "empty" value which is used to indicate - # "removal" - return not self.hasKey(key) - - # Determines whether a value for a property must be removed. - # - # If the property is empty, it's treated as it should be removed by the - # engine. - # - # Args: - # key: string: key - # Results: - # boolean: True if property must be removed, otherwise - False. - # Throws: - # None - def removeCurrentValue(self, key): - # helps to distinguish from "missing" value which is used to indicate - # "keeping unchanged" - if self.hasKey(key): - return not bool(self._configData[key]) - else: - return False - - # TODO - def getCnt(self, prefix): - res = 0 - for key in self._configData.keys(): - if key.startswith(prefix): - res += 1 - - return res - - # TODO - # TODO pass base64 - # Throws: - # Dies in case timezone is present but empty. - # Dies in case password is present but empty. - # Dies in case hostname is present but empty or greater than 63 chars. - # Dies in case UTC is present, but is not yes/YES or no/NO. - # Dies in case NICS is not present. - def validate(self): - # TODO must log all the errors - keyValidators = {'NIC1|IPv6GATEWAY|': None} - crossValidators = {} - - for key in self._configData.keys(): - pass +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging + +try: + import configparser +except ImportError: + import ConfigParser as configparser + +from .config_source import ConfigSource + +logger = logging.getLogger(__name__) + + +class ConfigFile(ConfigSource, dict): + """ConfigFile module to load the content from a specified source.""" + + def __init__(self): + pass + + def _insertKey(self, key, val): + """ + Inserts a Key Value pair. + + Keyword arguments: + key -- The key to insert + val -- The value to insert for the key + + """ + key = key.strip() + val = val.strip() + + if key.startswith('-') or '|-' in key: + canLog = 0 + else: + canLog = 1 + + # "sensitive" settings shall not be logged + if canLog: + logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val)) + else: + logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key) + + self[key] = val + + def size(self): + """Return the number of properties present.""" + return len(self) + + def loadConfigFile(self, filename): + """ + Parses properties from the specified config file. + + Any previously available properties will be removed. + Sensitive data will not be logged in case the key starts + from '-'. + + Keyword arguments: + filename - The full path to the config file. + """ + logger.info('Parsing the config file %s.' % filename) + + config = configparser.ConfigParser() + config.optionxform = str + config.read(filename) + + self.clear() + + for category in config.sections(): + logger.debug("FOUND CATEGORY = '%s'" % category) + + for (key, value) in config.items(category): + # "sensitive" settings shall not be logged + if key.startswith('-'): + canLog = 0 + else: + canLog = 1 + + if canLog: + logger.debug("Processing key, value: '%s':'%s'" % + (key, value)) + else: + logger.debug("Processing key, value : " + "'*********************'") + + self._insertKey(category + '|' + key, value) + + def keep_current_value(self, key): + """ + Determines whether a value for a property must be kept. + + If the propery is missing, it is treated as it should be not + changed by the engine. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "empty" value which is used to indicate + # "removal" + return not key in self + + def remove_current_value(self, key): + """ + Determines whether a value for the property must be removed. + + If the specified key is empty, it is treated as it should be + removed by the engine. + + Return true if the value can be removed, false otherwise. + + Keyword arguments: + key -- The key to search for. + """ + # helps to distinguish from "missing" value which is used to indicate + # "keeping unchanged" + if key in self: + return not bool(self[key]) + else: + return False + + def get_count(self, prefix): + """ + Return the total number of keys that start with the + specified prefix. + + Keyword arguments: + prefix -- prefix of the key + """ + res = 0 + for key in self.keys(): + if key.startswith(prefix): + res += 1 + + return res diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py index 7f76ac8b..7266b699 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py +++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py @@ -1,5 +1,25 @@ -from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from .config_source import ConfigSource class ConfigNamespace(ConfigSource): + """Specifies the Config Namespace.""" pass diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py index fad3a389..a367e476 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_source.py +++ b/cloudinit/sources/helpers/vmware/imc/config_source.py @@ -1,2 +1,23 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + class ConfigSource: + """Specifies a source for the Config Content.""" pass diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py index 66b4fad7..28544e4f 100644 --- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -1,29 +1,45 @@ -# from enum import Enum - - -# The IPv4 configuration mode which directly represents the user's goal. -# -# This mode effectively acts as a contract of the inguest customization engine. -# It must be set based on what the user has requested via VMODL/generators API -# and should not be changed by those layers. It's up to the in-guest engine to -# interpret and materialize the user's request. -# -# Also defined in linuxconfiggenerator.h. -class Ipv4Mode: - # The legacy mode which only allows dhcp/static based on whether IPv4 - # addresses list is empty or not - IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' - # IPv4 must use static address. Reserved for future use - IPV4_MODE_STATIC = 'STATIC' - # IPv4 must use DHCPv4. Reserved for future use - IPV4_MODE_DHCP = 'DHCP' - # IPv4 must be disabled - IPV4_MODE_DISABLED = 'DISABLED' - # IPv4 settings should be left untouched. Reserved for future use - IPV4_MODE_AS_IS = 'AS_IS' - - # def __eq__(self, other): - # return self.name == other.name and self.value == other.value - # - # def __ne__(self, other): - # return not self.__eq__(other) +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class Ipv4Mode: + """ + The IPv4 configuration mode which directly represents the user's goal. + + This mode effectively acts as a contract of the in-guest customization + engine. It must be set based on what the user has requested and should + not be changed by those layers. It's up to the in-guest engine to + interpret and materialize the user's request. + """ + + # The legacy mode which only allows dhcp/static based on whether IPv4 + # addresses list is empty or not + IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE' + + # IPv4 must use static address. Reserved for future use + IPV4_MODE_STATIC = 'STATIC' + + # IPv4 must use DHCPv4. Reserved for future use + IPV4_MODE_DHCP = 'DHCP' + + # IPv4 must be disabled + IPV4_MODE_DISABLED = 'DISABLED' + + # IPv4 settings should be left untouched. Reserved for future use + IPV4_MODE_AS_IS = 'AS_IS' diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py index b90a5640..bb45a9e6 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic.py +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -1,107 +1,147 @@ -from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProto - - -class Nic: - def __init__(self, name, configFile): - self._name = name - self._configFile = configFile - - def _get(self, what): - return self._configFile.get(self.name + what, None) - - def _getCnt(self, prefix): - return self._configFile.getCnt(self.name + prefix) - - @property - def name(self): - return self._name - - @property - def mac(self): - return self._get('|MACADDR').lower() - - @property - def bootProto(self): - return self._get('|BOOTPROTO').lower() - - @property - def ipv4(self): - # TODO implement NONE - if self.bootProto == BootProto.STATIC: - return StaticIpv4Conf(self) - - return DhcpIpv4Conf(self) - - @property - def ipv6(self): - # TODO implement NONE - cnt = self._getCnt("|IPv6ADDR|") - - if cnt != 0: - return StaticIpv6Conf(self) - - return DhcpIpv6Conf(self) - - -class DhcpIpv4Conf: - def __init__(self, nic): - self._nic = nic - - -class StaticIpv4Addr: - def __init__(self, nic): - self._nic = nic - - @property - def ip(self): - return self._nic._get('|IPADDR') - - @property - def netmask(self): - return self._nic._get('|NETMASK') - - @property - def gateway(self): - return self._nic._get('|GATEWAY') - - -class StaticIpv4Conf(DhcpIpv4Conf): - @property - def addrs(self): - return [StaticIpv4Addr(self._nic)] - - -class DhcpIpv6Conf: - def __init__(self, nic): - self._nic = nic - - -class StaticIpv6Addr: - def __init__(self, nic, index): - self._nic = nic - self._index = index - - @property - def ip(self): - return self._nic._get("|IPv6ADDR|" + str(self._index)) - - @property - def prefix(self): - return self._nic._get("|IPv6NETMASK|" + str(self._index)) - - @property - def gateway(self): - return self._nic._get("|IPv6GATEWAY|" + str(self._index)) - - -class StaticIpv6Conf(DhcpIpv6Conf): - @property - def addrs(self): - cnt = self._nic._getCnt("|IPv6ADDR|") - - res = [] - - for i in range(1, cnt + 1): - res.append(StaticIpv6Addr(self._nic, i)) - - return res +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +from .boot_proto import BootProto + + +class Nic: + """ + Holds the information about each NIC specified + in the customization specification file + """ + + def __init__(self, name, configFile): + self._name = name + self._configFile = configFile + + def _get(self, what): + return self._configFile.get(self.name + what, None) + + def _get_count(self, prefix): + return self._configFile.get_count(self.name + prefix) + + @property + def name(self): + return self._name + + @property + def mac(self): + return self._get('|MACADDR').lower() + + @property + def bootProto(self): + return self._get('|BOOTPROTO').lower() + + @property + def ipv4(self): + """ + Retrieves the DHCP or Static IPv6 configuration + based on the BOOTPROTO property associated with the NIC + """ + if self.bootProto == BootProto.STATIC: + return StaticIpv4Conf(self) + + return DhcpIpv4Conf(self) + + @property + def ipv6(self): + cnt = self._get_count("|IPv6ADDR|") + + if cnt != 0: + return StaticIpv6Conf(self) + + return DhcpIpv6Conf(self) + + +class DhcpIpv4Conf: + """DHCP Configuration Setting.""" + + def __init__(self, nic): + self._nic = nic + + +class StaticIpv4Addr: + """Static IPV4 Setting.""" + + def __init__(self, nic): + self._nic = nic + + @property + def ip(self): + return self._nic._get('|IPADDR') + + @property + def netmask(self): + return self._nic._get('|NETMASK') + + @property + def gateway(self): + return self._nic._get('|GATEWAY') + + +class StaticIpv4Conf(DhcpIpv4Conf): + """Static IPV4 Configuration.""" + + @property + def addrs(self): + """Return the list of associated IPv4 addresses.""" + return [StaticIpv4Addr(self._nic)] + + +class DhcpIpv6Conf: + """DHCP IPV6 Configuration.""" + + def __init__(self, nic): + self._nic = nic + + +class StaticIpv6Addr: + """Static IPV6 Address.""" + + def __init__(self, nic, index): + self._nic = nic + self._index = index + + @property + def ip(self): + return self._nic._get("|IPv6ADDR|" + str(self._index)) + + @property + def prefix(self): + return self._nic._get("|IPv6NETMASK|" + str(self._index)) + + @property + def gateway(self): + return self._nic._get("|IPv6GATEWAY|" + str(self._index)) + + +class StaticIpv6Conf(DhcpIpv6Conf): + """Static IPV6 Configuration.""" + + @property + def addrs(self): + """Return the list Associated IPV6 addresses.""" + cnt = self._nic._get_count("|IPv6ADDR|") + + res = [] + + for i in range(1, cnt + 1): + res.append(StaticIpv6Addr(self._nic, i)) + + return res -- cgit v1.2.3 From 415c45a2b9b66603e672e8ea54cee8f40a19abd1 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 19 Jan 2016 18:24:54 -0800 Subject: Fixed all the review comments from Daniel. Added a new file i.e. nic_base.py which will be used a base calls for all NIC related configuration. Modified some code in nic.py. --- cloudinit/sources/helpers/vmware/imc/boot_proto.py | 2 +- cloudinit/sources/helpers/vmware/imc/config.py | 6 +- .../sources/helpers/vmware/imc/config_file.py | 40 ++---- cloudinit/sources/helpers/vmware/imc/ipv4_mode.py | 2 +- cloudinit/sources/helpers/vmware/imc/nic.py | 118 +++++++--------- cloudinit/sources/helpers/vmware/imc/nic_base.py | 154 +++++++++++++++++++++ 6 files changed, 222 insertions(+), 100 deletions(-) create mode 100644 cloudinit/sources/helpers/vmware/imc/nic_base.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py index abfffd75..faba5887 100644 --- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py +++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py @@ -18,7 +18,7 @@ # along with this program. If not, see . -class BootProto: +class BootProtoEnum: """Specifies the NIC Boot Settings.""" DHCP = 'dhcp' diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py index 7eee47a5..aebc12a0 100644 --- a/cloudinit/sources/helpers/vmware/imc/config.py +++ b/cloudinit/sources/helpers/vmware/imc/config.py @@ -66,7 +66,8 @@ class Config: def name_servers(self): """Return the list of DNS servers.""" res = [] - for i in range(1, self._configFile.get_count(Config.DNS) + 1): + cnt = self._configFile.get_count_with_prefix(Config.DNS) + for i in range(1, cnt + 1): key = Config.DNS + str(i) res.append(self._configFile[key]) @@ -76,7 +77,8 @@ class Config: def dns_suffixes(self): """Return the list of DNS Suffixes.""" res = [] - for i in range(1, self._configFile.get_count(Config.SUFFIX) + 1): + cnt = self._configFile.get_count_with_prefix(Config.SUFFIX) + for i in range(1, cnt + 1): key = Config.SUFFIX + str(i) res.append(self._configFile[key]) diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index e08a2a9a..7c47d14c 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -32,7 +32,8 @@ logger = logging.getLogger(__name__) class ConfigFile(ConfigSource, dict): """ConfigFile module to load the content from a specified source.""" - def __init__(self): + def __init__(self, filename): + self._loadConfigFile(filename) pass def _insertKey(self, key, val): @@ -48,9 +49,9 @@ class ConfigFile(ConfigSource, dict): val = val.strip() if key.startswith('-') or '|-' in key: - canLog = 0 + canLog = False else: - canLog = 1 + canLog = True # "sensitive" settings shall not be logged if canLog: @@ -64,7 +65,7 @@ class ConfigFile(ConfigSource, dict): """Return the number of properties present.""" return len(self) - def loadConfigFile(self, filename): + def _loadConfigFile(self, filename): """ Parses properties from the specified config file. @@ -87,22 +88,9 @@ class ConfigFile(ConfigSource, dict): logger.debug("FOUND CATEGORY = '%s'" % category) for (key, value) in config.items(category): - # "sensitive" settings shall not be logged - if key.startswith('-'): - canLog = 0 - else: - canLog = 1 - - if canLog: - logger.debug("Processing key, value: '%s':'%s'" % - (key, value)) - else: - logger.debug("Processing key, value : " - "'*********************'") - self._insertKey(category + '|' + key, value) - def keep_current_value(self, key): + def should_keep_current_value(self, key): """ Determines whether a value for a property must be kept. @@ -114,9 +102,9 @@ class ConfigFile(ConfigSource, dict): """ # helps to distinguish from "empty" value which is used to indicate # "removal" - return not key in self + return key not in self - def remove_current_value(self, key): + def should_remove_current_value(self, key): """ Determines whether a value for the property must be removed. @@ -135,17 +123,11 @@ class ConfigFile(ConfigSource, dict): else: return False - def get_count(self, prefix): + def get_count_with_prefix(self, prefix): """ - Return the total number of keys that start with the - specified prefix. + Return the total count of keys that start with the specified prefix. Keyword arguments: prefix -- prefix of the key """ - res = 0 - for key in self.keys(): - if key.startswith(prefix): - res += 1 - - return res + return len([key for key in self if key.startswith(prefix)]) diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py index 28544e4f..33f88726 100644 --- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py +++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py @@ -18,7 +18,7 @@ # along with this program. If not, see . -class Ipv4Mode: +class Ipv4ModeEnum: """ The IPv4 configuration mode which directly represents the user's goal. diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py index bb45a9e6..a7594874 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic.py +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -17,10 +17,11 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -from .boot_proto import BootProto +from .boot_proto import BootProtoEnum +from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base -class Nic: +class Nic(NicBase): """ Holds the information about each NIC specified in the customization specification file @@ -31,10 +32,10 @@ class Nic: self._configFile = configFile def _get(self, what): - return self._configFile.get(self.name + what, None) + return self._configFile.get(self.name + '|' + what, None) - def _get_count(self, prefix): - return self._configFile.get_count(self.name + prefix) + def _get_count_with_prefix(self, prefix): + return self._configFile.get_count_with_prefix(self.name + prefix) @property def name(self): @@ -42,41 +43,52 @@ class Nic: @property def mac(self): - return self._get('|MACADDR').lower() + return self._get('MACADDR').lower() @property - def bootProto(self): - return self._get('|BOOTPROTO').lower() + def primary(self): + value = self._get('PRIMARY').lower() + return value == 'yes' or value == 'true' @property - def ipv4(self): - """ - Retrieves the DHCP or Static IPv6 configuration - based on the BOOTPROTO property associated with the NIC - """ - if self.bootProto == BootProto.STATIC: - return StaticIpv4Conf(self) + def onboot(self): + value = self._get('ONBOOT').lower() + return value == 'yes' or value == 'true' - return DhcpIpv4Conf(self) + @property + def bootProto(self): + return self._get('BOOTPROTO').lower() @property - def ipv6(self): - cnt = self._get_count("|IPv6ADDR|") + def ipv4_mode(self): + return self._get('IPv4_MODE').lower() - if cnt != 0: - return StaticIpv6Conf(self) + @property + def staticIpv4(self): + """ + Checks the BOOTPROTO property and returns StaticIPv4Addr + configuration object if STATIC configuration is set. + """ + if self.bootProto == BootProtoEnum.STATIC: + return [StaticIpv4Addr(self)] + else: + return None - return DhcpIpv6Conf(self) + @property + def staticIpv6(self): + cnt = self._get_count_with_prefix('|IPv6ADDR|') + if not cnt: + return None -class DhcpIpv4Conf: - """DHCP Configuration Setting.""" + result = [] + for index in range(1, cnt + 1): + result.append(StaticIpv6Addr(self, index)) - def __init__(self, nic): - self._nic = nic + return result -class StaticIpv4Addr: +class StaticIpv4Addr(StaticIpv4Base): """Static IPV4 Setting.""" def __init__(self, nic): @@ -84,34 +96,22 @@ class StaticIpv4Addr: @property def ip(self): - return self._nic._get('|IPADDR') + return self._nic._get('IPADDR') @property def netmask(self): - return self._nic._get('|NETMASK') + return self._nic._get('NETMASK') @property - def gateway(self): - return self._nic._get('|GATEWAY') + def gateways(self): + value = self._nic._get('GATEWAY') + if value: + return [x.strip() for x in value.split(',')] + else: + return None -class StaticIpv4Conf(DhcpIpv4Conf): - """Static IPV4 Configuration.""" - - @property - def addrs(self): - """Return the list of associated IPv4 addresses.""" - return [StaticIpv4Addr(self._nic)] - - -class DhcpIpv6Conf: - """DHCP IPV6 Configuration.""" - - def __init__(self, nic): - self._nic = nic - - -class StaticIpv6Addr: +class StaticIpv6Addr(StaticIpv6Base): """Static IPV6 Address.""" def __init__(self, nic, index): @@ -120,28 +120,12 @@ class StaticIpv6Addr: @property def ip(self): - return self._nic._get("|IPv6ADDR|" + str(self._index)) + return self._nic._get('IPv6ADDR|' + str(self._index)) @property - def prefix(self): - return self._nic._get("|IPv6NETMASK|" + str(self._index)) + def netmask(self): + return self._nic._get('IPv6NETMASK|' + str(self._index)) @property def gateway(self): - return self._nic._get("|IPv6GATEWAY|" + str(self._index)) - - -class StaticIpv6Conf(DhcpIpv6Conf): - """Static IPV6 Configuration.""" - - @property - def addrs(self): - """Return the list Associated IPV6 addresses.""" - cnt = self._nic._get_count("|IPv6ADDR|") - - res = [] - - for i in range(1, cnt + 1): - res.append(StaticIpv6Addr(self._nic, i)) - - return res + return self._nic._get('IPv6GATEWAY|' + str(self._index)) diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py new file mode 100644 index 00000000..030ba311 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py @@ -0,0 +1,154 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2015 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class NicBase: + """ + Define what are expected of each nic. + The following properties should be provided in an implementation class. + """ + + @property + def mac(self): + """ + Retrieves the mac address of the nic + @return (str) : the MACADDR setting + """ + raise NotImplementedError('MACADDR') + + @property + def primary(self): + """ + Retrieves whether the nic is the primary nic + Indicates whether NIC will be used to define the default gateway. + If none of the NICs is configured to be primary, default gateway won't + be set. + @return (bool): the PRIMARY setting + """ + raise NotImplementedError('PRIMARY') + + @property + def onboot(self): + """ + Retrieves whether the nic should be up at the boot time + @return (bool) : the ONBOOT setting + """ + raise NotImplementedError('ONBOOT') + + @property + def bootProto(self): + """ + Retrieves the boot protocol of the nic + @return (str): the BOOTPROTO setting, valid values: dhcp and static. + """ + raise NotImplementedError('BOOTPROTO') + + @property + def ipv4_mode(self): + """ + Retrieves the IPv4_MODE + @return (str): the IPv4_MODE setting, valid values: + backwards_compatible, static, dhcp, disabled, as_is + """ + raise NotImplementedError('IPv4_MODE') + + @property + def staticIpv4(self): + """ + Retrieves the static IPv4 configuration of the nic + @return (StaticIpv4Base list): the static ipv4 setting + """ + raise NotImplementedError('Static IPv4') + + @property + def staticIpv6(self): + """ + Retrieves the IPv6 configuration of the nic + @return (StaticIpv6Base list): the static ipv6 setting + """ + raise NotImplementedError('Static Ipv6') + + def validate(self): + """ + Validate the object + For example, the staticIpv4 property is required and should not be + empty when ipv4Mode is STATIC + """ + raise NotImplementedError('Check constraints on properties') + + +class StaticIpv4Base: + """ + Define what are expected of a static IPv4 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv4 address + @return (str): the IPADDR setting + """ + raise NotImplementedError('Ipv4 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv4 NETMASK setting + @return (str): the NETMASK setting + """ + raise NotImplementedError('Ipv4 NETMASK') + + @property + def gateways(self): + """ + Retrieves the gateways on this Ipv4 subnet + @return (str list): the GATEWAY setting + """ + raise NotImplementedError('Ipv4 GATEWAY') + + +class StaticIpv6Base: + """Define what are expected of a static IPv6 setting + The following properties should be provided in an implementation class. + """ + + @property + def ip(self): + """ + Retrieves the Ipv6 address + @return (str): the IPv6ADDR setting + """ + raise NotImplementedError('Ipv6 Address') + + @property + def netmask(self): + """ + Retrieves the Ipv6 NETMASK setting + @return (str): the IPv6NETMASK setting + """ + raise NotImplementedError('Ipv6 NETMASK') + + @property + def gateway(self): + """ + Retrieves the Ipv6 GATEWAY setting + @return (str): the IPv6GATEWAY setting + """ + raise NotImplementedError('Ipv6 GATEWAY') -- cgit v1.2.3 From 75ba44d2730b89f13b2069961ea8de63f65ea780 Mon Sep 17 00:00:00 2001 From: Robert Jennings Date: Thu, 4 Feb 2016 15:52:08 -0600 Subject: SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965) LX-brand zones on Joyent's SmartOS use a different metadata source (socket file) than the KVM-based SmartOS virtualization (serial port). This patch adds support for recognizing the different flavors of virtualization on SmartOS and setting up a metadata source file object. After the file object is created, the rest of the code for the datasource LP: #1540965 --- cloudinit/sources/DataSourceSmartOS.py | 257 ++++++++++++++---------- doc/examples/cloud-config-datasources.txt | 7 + tests/unittests/test_datasource/test_smartos.py | 85 +++++--- 3 files changed, 216 insertions(+), 133 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index c9b497df..7453379a 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -20,10 +20,13 @@ # Datasource for provisioning on SmartOS. This works on Joyent # and public/private Clouds using SmartOS. # -# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests. +# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests # The meta-data is transmitted via key/value pairs made by # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. +# For Linux Guests running in LX-Brand Zones on SmartOS hosts +# a socket (/native/.zonecontrol/metadata.sock) is used instead +# of a serial console. # # Certain behavior is defined by the DataDictionary # http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html @@ -34,6 +37,8 @@ import contextlib import os import random import re +import socket +import stat import serial @@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__) SMARTOS_ATTRIB_MAP = { # Cloud-init Key : (SmartOS Key, Strip line endings) + 'instance-id': ('sdc:uuid', True), 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), @@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME] # BUILTIN_DS_CONFIG = { 'serial_device': '/dev/ttyS1', + 'metadata_sockfile': '/native/.zonecontrol/metadata.sock', 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', @@ -83,6 +90,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', + 'sdc:uuid', ], 'base64_keys': [], 'base64_all': False, @@ -150,17 +158,27 @@ class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.is_smartdc = None - self.ds_cfg = util.mergemanydict([ self.ds_cfg, util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.metadata = {} - self.cfg = BUILTIN_CLOUD_CONFIG - self.seed = self.ds_cfg.get("serial_device") - self.seed_timeout = self.ds_cfg.get("serial_timeout") + # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but + # report 'BrandZ virtual linux' as the kernel version + if os.uname()[3].lower() == 'brandz virtual linux': + LOG.debug("Host is SmartOS, guest in Zone") + self.is_smartdc = True + self.smartos_type = 'lx-brand' + self.cfg = {} + self.seed = self.ds_cfg.get("metadata_sockfile") + else: + self.is_smartdc = True + self.smartos_type = 'kvm' + self.seed = self.ds_cfg.get("serial_device") + self.cfg = BUILTIN_CLOUD_CONFIG + self.seed_timeout = self.ds_cfg.get("serial_timeout") self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') @@ -170,12 +188,49 @@ class DataSourceSmartOS(sources.DataSource): root = sources.DataSource.__str__(self) return "%s [seed=%s]" % (root, self.seed) + def _get_seed_file_object(self): + if not self.seed: + raise AttributeError("seed device is not set") + + if self.smartos_type == 'lx-brand': + if not stat.S_ISSOCK(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a socket", self.seed) + return None + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.seed) + return sock.makefile('rwb') + else: + if not stat.S_ISCHR(os.stat(self.seed).st_mode): + LOG.debug("Seed %s is not a character device") + return None + ser = serial.Serial(self.seed, timeout=self.seed_timeout) + if not ser.isOpen(): + raise SystemError("Unable to open %s" % self.seed) + return ser + return None + + def _set_provisioned(self): + '''Mark the instance provisioning state as successful. + + When run in a zone, the host OS will look for /var/svc/provisioning + to be renamed as /var/svc/provision_success. This should be done + after meta-data is successfully retrieved and from this point + the host considers the provision of the zone to be a success and + keeps the zone running. + ''' + + LOG.debug('Instance provisioning state set as successful') + svc_path = '/var/svc' + if os.path.exists('/'.join([svc_path, 'provisioning'])): + os.rename('/'.join([svc_path, 'provisioning']), + '/'.join([svc_path, 'provision_success'])) + def get_data(self): md = {} ud = "" if not device_exists(self.seed): - LOG.debug("No serial device '%s' found for SmartOS datasource", + LOG.debug("No metadata device '%s' found for SmartOS datasource", self.seed) return False @@ -185,29 +240,36 @@ class DataSourceSmartOS(sources.DataSource): LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)") return False - dmi_info = dmi_data() - if dmi_info is False: - LOG.debug("No dmidata utility found") - return False - - system_uuid, system_type = tuple(dmi_info) - if 'smartdc' not in system_type.lower(): - LOG.debug("Host is not on SmartOS. system_type=%s", system_type) + # SDC KVM instances will provide dmi data, LX-brand does not + if self.smartos_type == 'kvm': + dmi_info = dmi_data() + if dmi_info is False: + LOG.debug("No dmidata utility found") + return False + + system_type = dmi_info + if 'smartdc' not in system_type.lower(): + LOG.debug("Host is not on SmartOS. system_type=%s", + system_type) + return False + LOG.debug("Host is SmartOS, guest in KVM") + + seed_obj = self._get_seed_file_object() + if seed_obj is None: + LOG.debug('Seed file object not found.') return False - self.is_smartdc = True - md['instance-id'] = system_uuid + with contextlib.closing(seed_obj) as seed: + b64_keys = self.query('base64_keys', seed, strip=True, b64=False) + if b64_keys is not None: + self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] - b64_keys = self.query('base64_keys', strip=True, b64=False) - if b64_keys is not None: - self.b64_keys = [k.strip() for k in str(b64_keys).split(',')] + b64_all = self.query('base64_all', seed, strip=True, b64=False) + if b64_all is not None: + self.b64_all = util.is_true(b64_all) - b64_all = self.query('base64_all', strip=True, b64=False) - if b64_all is not None: - self.b64_all = util.is_true(b64_all) - - for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): - smartos_noun, strip = attribute - md[ci_noun] = self.query(smartos_noun, strip=strip) + for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items(): + smartos_noun, strip = attribute + md[ci_noun] = self.query(smartos_noun, seed, strip=strip) # @datadictionary: This key may contain a program that is written # to a file in the filesystem of the guest on each boot and then @@ -240,7 +302,7 @@ class DataSourceSmartOS(sources.DataSource): # Handle the cloud-init regular meta if not md['local-hostname']: - md['local-hostname'] = system_uuid + md['local-hostname'] = md['instance-id'] ud = None if md['user-data']: @@ -257,6 +319,8 @@ class DataSourceSmartOS(sources.DataSource): self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud self.vendordata_raw = md['vendor-data'] + + self._set_provisioned() return True def device_name_to_device(self, name): @@ -268,40 +332,64 @@ class DataSourceSmartOS(sources.DataSource): def get_instance_id(self): return self.metadata['instance-id'] - def query(self, noun, strip=False, default=None, b64=None): + def query(self, noun, seed_file, strip=False, default=None, b64=None): if b64 is None: if noun in self.smartos_no_base64: b64 = False elif self.b64_all or noun in self.b64_keys: b64 = True - return query_data(noun=noun, strip=strip, seed_device=self.seed, - seed_timeout=self.seed_timeout, default=default, - b64=b64) + return self._query_data(noun, seed_file, strip=strip, + default=default, b64=b64) + def _query_data(self, noun, seed_file, strip=False, + default=None, b64=None): + """Makes a request via "GET " -def device_exists(device): - """Symplistic method to determine if the device exists or not""" - return os.path.exists(device) + In the response, the first line is the status, while subsequent + lines are is the value. A blank line with a "." is used to + indicate end of response. + If the response is expected to be base64 encoded, then set + b64encoded to true. Unfortantely, there is no way to know if + something is 100% encoded, so this method relies on being told + if the data is base64 or not. + """ -def get_serial(seed_device, seed_timeout): - """This is replaced in unit testing, allowing us to replace - serial.Serial with a mocked class. + if not noun: + return False - The timeout value of 60 seconds should never be hit. The value - is taken from SmartOS own provisioning tools. Since we are reading - each line individually up until the single ".", the transfer is - usually very fast (i.e. microseconds) to get the response. - """ - if not seed_device: - raise AttributeError("seed_device value is not set") + response = JoyentMetadataClient(seed_file).get_metadata(noun) + + if response is None: + return default + + if b64 is None: + b64 = self._query_data('b64-%s' % noun, seed_file, b64=False, + default=False, strip=True) + b64 = util.is_true(b64) + + resp = None + if b64 or strip: + resp = "".join(response).rstrip() + else: + resp = "".join(response) - ser = serial.Serial(seed_device, timeout=seed_timeout) - if not ser.isOpen(): - raise SystemError("Unable to open %s" % seed_device) + if b64: + try: + return util.b64d(resp) + # Bogus input produces different errors in Python 2 and 3; + # catch both. + except (TypeError, binascii.Error): + LOG.warn("Failed base64 decoding key '%s'", noun) + return resp - return ser + return resp + + +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) class JoyentMetadataFetchException(Exception): @@ -320,8 +408,8 @@ class JoyentMetadataClient(object): r' (?P(?P[0-9a-f]+) (?PSUCCESS|NOTFOUND)' r'( (?P.+))?)') - def __init__(self, serial): - self.serial = serial + def __init__(self, metasource): + self.metasource = metasource def _checksum(self, body): return '{0:08x}'.format( @@ -356,67 +444,30 @@ class JoyentMetadataClient(object): util.b64e(metadata_key)) msg = 'V2 {0} {1} {2}\n'.format( len(message_body), self._checksum(message_body), message_body) - LOG.debug('Writing "%s" to serial port.', msg) - self.serial.write(msg.encode('ascii')) - response = self.serial.readline().decode('ascii') - LOG.debug('Read "%s" from serial port.', response) - return self._get_value_from_frame(request_id, response) - - -def query_data(noun, seed_device, seed_timeout, strip=False, default=None, - b64=None): - """Makes a request to via the serial console via "GET " - - In the response, the first line is the status, while subsequent lines - are is the value. A blank line with a "." is used to indicate end of - response. - - If the response is expected to be base64 encoded, then set b64encoded - to true. Unfortantely, there is no way to know if something is 100% - encoded, so this method relies on being told if the data is base64 or - not. - """ - if not noun: - return False - - with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser: - client = JoyentMetadataClient(ser) - response = client.get_metadata(noun) - - if response is None: - return default - - if b64 is None: - b64 = query_data('b64-%s' % noun, seed_device=seed_device, - seed_timeout=seed_timeout, b64=False, - default=False, strip=True) - b64 = util.is_true(b64) - - resp = None - if b64 or strip: - resp = "".join(response).rstrip() - else: - resp = "".join(response) - - if b64: - try: - return util.b64d(resp) - # Bogus input produces different errors in Python 2 and 3; catch both. - except (TypeError, binascii.Error): - LOG.warn("Failed base64 decoding key '%s'", noun) - return resp + LOG.debug('Writing "%s" to metadata transport.', msg) + self.metasource.write(msg.encode('ascii')) + self.metasource.flush() + + response = bytearray() + response.extend(self.metasource.read(1)) + while response[-1:] != b'\n': + response.extend(self.metasource.read(1)) + response = response.rstrip().decode('ascii') + LOG.debug('Read "%s" from metadata transport.', response) + + if 'SUCCESS' not in response: + return None - return resp + return self._get_value_from_frame(request_id, response) def dmi_data(): - sys_uuid = util.read_dmi_data("system-uuid") sys_type = util.read_dmi_data("system-product-name") - if not sys_uuid or not sys_type: + if not sys_type: return None - return (sys_uuid.lower(), sys_type) + return sys_type def write_boot_content(content, content_f, link=None, shebang=False, diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt index 3bde4aac..2651c027 100644 --- a/doc/examples/cloud-config-datasources.txt +++ b/doc/examples/cloud-config-datasources.txt @@ -51,12 +51,19 @@ datasource: policy: on # [can be 'on', 'off' or 'force'] SmartOS: + # For KVM guests: # Smart OS datasource works over a serial console interacting with # a server on the other end. By default, the second serial console is the # device. SmartOS also uses a serial timeout of 60 seconds. serial_device: /dev/ttyS1 serial_timeout: 60 + # For LX-Brand Zones guests: + # Smart OS datasource works over a socket interacting with + # the host on the other end. By default, the socket file is in + # the native .zoncontrol directory. + metadata_sockfile: /native/.zonecontrol/metadata.sock + # a list of keys that will not be base64 decoded even if base64_all no_base64_decode: ['root_authorized_keys', 'motd_sys_info', 'iptables_disable'] diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index adee9019..1235436d 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,6 +31,7 @@ import shutil import stat import tempfile import uuid +import unittest from binascii import crc32 import serial @@ -56,12 +57,13 @@ MOCK_RETURNS = { 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), 'sdc:datacenter_name': 'somewhere2', 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'sdc:uuid': str(uuid.uuid4()), 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']), 'user-data': '\n'.join(['something', '']), 'user-script': '\n'.join(['/bin/true', '']), } -DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') +DMI_DATA_RETURN = 'smartdc' def get_mock_client(mockdata): @@ -111,7 +113,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): ret = apply_patches(patches) self.unapply += ret - def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None): + def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None, + is_lxbrand=False): mod = DataSourceSmartOS if mockdata is None: @@ -124,9 +127,13 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): return dmi_data def _os_uname(): - # LP: #1243287. tests assume this runs, but running test on - # arm would cause them all to fail. - return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + if not is_lxbrand: + # LP: #1243287. tests assume this runs, but running test on + # arm would cause them all to fail. + return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64') + else: + return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX', + 'X86_64') if sys_cfg is None: sys_cfg = {} @@ -136,7 +143,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): sys_cfg['datasource']['SmartOS'] = ds_cfg self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)]) - self.apply_patches([(mod, 'get_serial', mock.MagicMock())]) self.apply_patches([ (mod, 'JoyentMetadataClient', get_mock_client(mockdata))]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) @@ -144,6 +150,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.apply_patches([(mod, 'device_exists', lambda d: True)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, paths=self.paths) + self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())]) return dsrc def test_seed(self): @@ -151,14 +158,29 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) + self.assertEquals('kvm', dsrc.smartos_type) self.assertEquals('/dev/ttyS1', dsrc.seed) + def test_seed_lxbrand(self): + # default seed should be /dev/ttyS1 + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals('lx-brand', dsrc.smartos_type) + self.assertEquals('/native/.zonecontrol/metadata.sock', dsrc.seed) + def test_issmartdc(self): dsrc = self._get_ds() ret = dsrc.get_data() self.assertTrue(ret) self.assertTrue(dsrc.is_smartdc) + def test_issmartdc_lxbrand(self): + dsrc = self._get_ds(is_lxbrand=True) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertTrue(dsrc.is_smartdc) + def test_no_base64(self): ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True} dsrc = self._get_ds(ds_cfg=ds_cfg) @@ -169,7 +191,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id']) + self.assertEquals(MOCK_RETURNS['sdc:uuid'], + dsrc.metadata['instance-id']) def test_root_keys(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) @@ -407,18 +430,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.assertEqual(dsrc.device_name_to_device('FOO'), mydscfg['disk_aliases']['FOO']) - @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient') - @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial') - def test_serial_console_closed_on_error(self, get_serial, metadata_client): - class OurException(Exception): - pass - metadata_client.side_effect = OurException - try: - DataSourceSmartOS.query_data('noun', 'device', 0) - except OurException: - pass - self.assertEqual(1, get_serial.return_value.close.call_count) - def apply_patches(patches): ret = [] @@ -447,14 +458,25 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): } def make_response(): - payload = '' - if self.response_parts['payload']: - payload = ' {0}'.format(self.response_parts['payload']) - del self.response_parts['payload'] - return ( - 'V2 {length} {crc} {request_id} {command}{payload}\n'.format( - payload=payload, **self.response_parts).encode('ascii')) - self.serial.readline.side_effect = make_response + payloadstr = '' + if 'payload' in self.response_parts: + payloadstr = ' {0}'.format(self.response_parts['payload']) + return ('V2 {length} {crc} {request_id} ' + '{command}{payloadstr}\n'.format( + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) + + self.metasource_data = None + + def read_response(length): + if not self.metasource_data: + self.metasource_data = make_response() + self.metasource_data_len = len(self.metasource_data) + resp = self.metasource_data[:length] + self.metasource_data = self.metasource_data[length:] + return resp + + self.serial.read.side_effect = read_response self.patched_funcs.enter_context( mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint', mock.Mock(return_value=self.request_id))) @@ -477,7 +499,9 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): client.get_metadata('some_key') self.assertEqual(1, self.serial.write.call_count) written_line = self.serial.write.call_args[0][0] - self.assertEndsWith(written_line, b'\n') + print(type(written_line)) + self.assertEndsWith(written_line.decode('ascii'), + b'\n'.decode('ascii')) self.assertEqual(1, written_line.count(b'\n')) def _get_written_line(self, key='some_key'): @@ -489,7 +513,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): self.assertIsInstance(self._get_written_line(), six.binary_type) def test_get_metadata_line_starts_with_v2(self): - self.assertStartsWith(self._get_written_line(), b'V2') + foo = self._get_written_line() + self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii')) def test_get_metadata_uses_get_command(self): parts = self._get_written_line().decode('ascii').strip().split(' ') @@ -526,7 +551,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): def test_get_metadata_reads_a_line(self): client = self._get_client() client.get_metadata('some_key') - self.assertEqual(1, self.serial.readline.call_count) + self.assertEqual(self.metasource_data_len, self.serial.read.call_count) def test_get_metadata_returns_valid_value(self): client = self._get_client() -- cgit v1.2.3 From 39f668e5db8d09c46eee3a5df73a69f8d85ba489 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 9 Feb 2016 17:54:07 -0800 Subject: - Added the code to configure the NICs. - Added the code to detect VMware Virtual Platform and apply the customization based on the 'Customization Specification File' Pushed into the guest VM. --- cloudinit/sources/DataSourceOVF.py | 107 ++++++++- cloudinit/sources/helpers/vmware/imc/config_nic.py | 246 +++++++++++++++++++++ cloudinit/sources/helpers/vmware/imc/nic.py | 28 ++- 3 files changed, 372 insertions(+), 9 deletions(-) create mode 100644 cloudinit/sources/helpers/vmware/imc/config_nic.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 58a4b2a2..add7d243 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -24,11 +24,16 @@ from xml.dom import minidom import base64 import os +import shutil import re +import time from cloudinit import log as logging from cloudinit import sources from cloudinit import util +from cloudinit.sources.helpers.vmware.imc.config import Config +from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile +from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator LOG = logging.getLogger(__name__) @@ -50,13 +55,51 @@ class DataSourceOVF(sources.DataSource): found = [] md = {} ud = "" + vmwarePlatformFound = False + vmwareImcConfigFilePath = '' defaults = { "instance-id": "iid-dsovf", } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) - if seedfile: + dmi_info = dmi_data() + system_uuid = "" + system_type = "" + + if dmi_info is False: + LOG.debug("No dmidata utility found") + else: + system_uuid, system_type = tuple(dmi_info) + + if 'vmware' in system_type.lower(): + LOG.debug("VMware Virtual Platform found") + deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") + if deployPkgPluginPath: + vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, + msg="waiting for configuration file", + func=wait_for_imc_cfg_file, + args=("/tmp", "cust.cfg")) + + if vmwareImcConfigFilePath: + LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath) + else: + LOG.debug("Didn't find VMware DeployPkg Config File Path") + + if vmwareImcConfigFilePath: + try: + cf = ConfigFile(vmwareImcConfigFilePath) + conf = Config(cf) + (md, ud, cfg) = read_vmware_imc(conf) + nicConfigurator = NicConfigurator(conf.nics) + nicConfigurator.configure() + vmwarePlatformFound = True + except Exception as inst: + LOG.debug("Error while parsing the Customization Config File") + finally: + dirPath = os.path.dirname(vmwareImcConfigFilePath) + shutil.rmtree(dirPath) + elif seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) (md, ud, cfg) = read_ovf_environment(contents) @@ -76,7 +119,7 @@ class DataSourceOVF(sources.DataSource): found.append(name) # There was no OVF transports found - if len(found) == 0: + if len(found) == 0 and not vmwarePlatformFound: return False if 'seedfrom' in md and md['seedfrom']: @@ -108,7 +151,7 @@ class DataSourceOVF(sources.DataSource): def get_public_ssh_keys(self): if 'public-keys' not in self.metadata: - return [] + return [] pks = self.metadata['public-keys'] if isinstance(pks, (list)): return pks @@ -129,6 +172,31 @@ class DataSourceOVFNet(DataSourceOVF): self.supported_seed_starts = ("http://", "https://", "ftp://") +def wait_for_imc_cfg_file(directoryPath, filename, maxwait=180, naplen=5): + waited = 0 + + while waited < maxwait: + fileFullPath = search_file(directoryPath, filename) + if fileFullPath: + return fileFullPath + time.sleep(naplen) + waited += naplen + return None + +# This will return a dict with some content +# meta-data, user-data, some config +def read_vmware_imc(config): + md = {} + cfg = {} + ud = "" + if config.host_name: + if config.domain_name: + md['local-hostname'] = config.host_name + "." + config.domain_name + else: + md['local-hostname'] = config.host_name + + return (md, ud, cfg) + # This will return a dict with some content # meta-data, user-data, some config def read_ovf_environment(contents): @@ -280,6 +348,39 @@ def get_properties(contents): return props +def dmi_data(): + sys_uuid = util.read_dmi_data("system-uuid") + sys_type = util.read_dmi_data("system-product-name") + + if not sys_uuid or not sys_type: + return None + + return (sys_uuid.lower(), sys_type) + +def search_file(directoryPath, filename): + if not directoryPath or not filename: + return None + + dirs = [] + + if os.path.isdir(directoryPath): + dirs.append(directoryPath) + + while dirs: + dir = dirs.pop() + children = [] + try: + children.extend(os.listdir(dir)) + except: + LOG.debug("Ignoring the error while searching the directory %s" % dir) + for child in children: + childFullPath = os.path.join(dir, child) + if os.path.isdir(childFullPath): + dirs.append(childFullPath) + elif child == filename: + return childFullPath + + return None class XmlError(Exception): pass diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py new file mode 100644 index 00000000..8e2fc5d3 --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -0,0 +1,246 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2015 Canonical Ltd. +# Copyright (C) 2016 VMware INC. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging +import os +import subprocess +import re + +logger = logging.getLogger(__name__) + + +class NicConfigurator: + def __init__(self, nics): + """ + Initialize the Nic Configurator + @param nics (list) an array of nics to configure + """ + self.nics = nics + self.mac2Name = {} + self.ipv4PrimaryGateway = None + self.ipv6PrimaryGateway = None + self.find_devices() + self._primaryNic = self.get_primary_nic() + + def get_primary_nic(self): + """ + Retrieve the primary nic if it exists + @return (NicBase): the primary nic if exists, None otherwise + """ + primaryNic = None + + for nic in self.nics: + if nic.primary: + if primaryNic: + raise Exception('There can only be one primary nic', + primaryNic.mac, nic.mac) + primaryNic = nic + + return primaryNic + + def find_devices(self): + """ + Create the mac2Name dictionary + The mac address(es) are in the lower case + """ + cmd = 'ip addr show' + outText = subprocess.check_output(cmd, shell=True).decode() + sections = re.split(r'\n\d+: ', '\n' + outText)[1:] + + macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' + for section in sections: + matcher = re.search(macPat, section) + if not matcher: # Only keep info about nics + continue + mac = matcher.group(1).lower() + name = section.split(':', 1)[0] + self.mac2Name[mac] = name + + def gen_one_nic(self, nic): + """ + Return the lines needed to configure a nic + @return (str list): the string list to configure the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + name = self.mac2Name.get(nic.mac.lower()) + if not name: + raise ValueError('No known device has MACADDR: %s' % nic.mac) + + if nic.onboot: + lines.append('auto %s' % name) + + # Customize IPv4 + lines.extend(self.gen_ipv4(name, nic)) + + # Customize IPv6 + lines.extend(self.gen_ipv6(name, nic)) + + lines.append('') + + return lines + + def gen_ipv4(self, name, nic): + """ + Return the lines needed to configure the IPv4 setting of a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + bootproto = nic.bootProto.lower() + if nic.ipv4_mode.lower() == 'disabled': + bootproto = 'manual' + lines.append('iface %s inet %s' % (name, bootproto)) + + if bootproto != 'static': + return lines + + # Static Ipv4 + v4 = nic.staticIpv4 + if v4.ip: + lines.append(' address %s' % v4.ip) + if v4.netmask: + lines.append(' netmask %s' % v4.netmask) + + # Add the primary gateway + if nic.primary and v4.gateways: + self.ipv4PrimaryGateway = v4.gateways[0] + lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self.gen_ipv4_route(nic, v4.gateways)) + + return lines + + def gen_ipv4_route(self, nic, gateways): + """ + Return the lines needed to configure additional Ipv4 route + @return (str list): the string list to configure the gateways + @param nic (NicBase): the nic to configure + @param gateways (str list): the list of gateways + """ + lines = [] + + for gateway in gateways: + lines.append(' up route add default gw %s metric 10000' % gateway) + + return lines + + def gen_ipv6(self, name, nic): + """ + Return the lines needed to configure the gateways for a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + if not nic.staticIpv6: + return lines + + # Static Ipv6 + addrs = nic.staticIpv6 + lines.append('iface %s inet6 static' % name) + lines.append(' address %s' % addrs[0].ip) + lines.append(' netmask %s' % addrs[0].netmask) + + for addr in addrs[1:]: + lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip, + addr.netmask)) + # Add the primary gateway + if nic.primary: + for addr in addrs: + if addr.gateway: + self.ipv6PrimaryGateway = addr.gateway + lines.append(' gateway %s' % self.ipv6PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self._genIpv6Route(name, nic, addrs)) + + return lines + + def _genIpv6Route(self, name, nic, addrs): + lines = [] + + for addr in addrs: + lines.append(' up route -A inet6 add default gw %s metric 10000' % + addr.gateway) + + return lines + + def generate(self): + """Return the lines that is needed to configure the nics""" + lines = [] + lines.append('iface lo inet loopback') + lines.append('auto lo') + lines.append('') + + for nic in self.nics: + lines.extend(self.gen_one_nic(nic)) + + return lines + + def clear_dhcp(self): + logger.info('Clearing DHCP leases') + + subprocess.call('pkill dhclient', shell=True) + subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True) + + def if_down_up(self): + names = [] + for nic in self.nics: + name = self.mac2Name.get(nic.mac.lower()) + names.append(name) + + for name in names: + logger.info('Bring down interface %s' % name) + subprocess.check_call('ifdown %s' % name, shell=True) + + self.clear_dhcp() + + for name in names: + logger.info('Bring up interface %s' % name) + subprocess.check_call('ifup %s' % name, shell=True) + + def configure(self): + """ + Configure the /etc/network/intefaces + Make a back up of the original + """ + containingDir = '/etc/network' + + interfaceFile = os.path.join(containingDir, 'interfaces') + originalFile = os.path.join(containingDir, + 'interfaces.before_vmware_customization') + + if not os.path.exists(originalFile) and os.path.exists(interfaceFile): + os.rename(interfaceFile, originalFile) + + lines = self.generate() + with open(interfaceFile, 'w') as fp: + for line in lines: + fp.write('%s\n' % line) + + self.if_down_up() diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py index a7594874..6628a3ec 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic.py +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -47,21 +47,37 @@ class Nic(NicBase): @property def primary(self): - value = self._get('PRIMARY').lower() - return value == 'yes' or value == 'true' + value = self._get('PRIMARY') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False @property def onboot(self): - value = self._get('ONBOOT').lower() - return value == 'yes' or value == 'true' + value = self._get('ONBOOT') + if value: + value = value.lower() + return value == 'yes' or value == 'true' + else: + return False @property def bootProto(self): - return self._get('BOOTPROTO').lower() + value = self._get('BOOTPROTO') + if value: + return value.lower() + else: + return "" @property def ipv4_mode(self): - return self._get('IPv4_MODE').lower() + value = self._get('IPv4_MODE') + if value: + return value.lower() + else: + return "" @property def staticIpv4(self): -- cgit v1.2.3 From 0ce71cb8975e19677eea415101e15da5f4095cd5 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 16 Feb 2016 17:34:24 -0800 Subject: - Used proper 4 space indentations for config_nic.py and nic.py - Implemented the 'search_file' function using 'os.walk()' - Fixed few variable names. - Removed size() function in config_file.py - Updated the test_config_file.py to use len() instead of .size() --- cloudinit/sources/DataSourceOVF.py | 34 +- .../sources/helpers/vmware/imc/config_file.py | 4 - cloudinit/sources/helpers/vmware/imc/config_nic.py | 433 +++++++++++---------- cloudinit/sources/helpers/vmware/imc/nic.py | 20 +- tests/unittests/test_vmware_config_file.py | 4 +- 5 files changed, 238 insertions(+), 257 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index add7d243..6d3bf7bb 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -64,13 +64,12 @@ class DataSourceOVF(sources.DataSource): (seedfile, contents) = get_ovf_env(self.paths.seed_dir) dmi_info = dmi_data() - system_uuid = "" system_type = "" - if dmi_info is False: + if dmi_info is None: LOG.debug("No dmidata utility found") else: - system_uuid, system_type = tuple(dmi_info) + (_, system_type) = dmi_info if 'vmware' in system_type.lower(): LOG.debug("VMware Virtual Platform found") @@ -172,11 +171,11 @@ class DataSourceOVFNet(DataSourceOVF): self.supported_seed_starts = ("http://", "https://", "ftp://") -def wait_for_imc_cfg_file(directoryPath, filename, maxwait=180, naplen=5): +def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): waited = 0 while waited < maxwait: - fileFullPath = search_file(directoryPath, filename) + fileFullPath = search_file(dirpath, filename) if fileFullPath: return fileFullPath time.sleep(naplen) @@ -357,28 +356,13 @@ def dmi_data(): return (sys_uuid.lower(), sys_type) -def search_file(directoryPath, filename): - if not directoryPath or not filename: +def search_file(dirpath, filename): + if not dirpath or not filename: return None - dirs = [] - - if os.path.isdir(directoryPath): - dirs.append(directoryPath) - - while dirs: - dir = dirs.pop() - children = [] - try: - children.extend(os.listdir(dir)) - except: - LOG.debug("Ignoring the error while searching the directory %s" % dir) - for child in children: - childFullPath = os.path.join(dir, child) - if os.path.isdir(childFullPath): - dirs.append(childFullPath) - elif child == filename: - return childFullPath + for root, dirs, files in os.walk(dirpath): + if filename in files: + return os.path.join(root, filename) return None diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py index 7c47d14c..bb9fb7dc 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_file.py +++ b/cloudinit/sources/helpers/vmware/imc/config_file.py @@ -61,10 +61,6 @@ class ConfigFile(ConfigSource, dict): self[key] = val - def size(self): - """Return the number of properties present.""" - return len(self) - def _loadConfigFile(self, filename): """ Parses properties from the specified config file. diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 8e2fc5d3..d79e6936 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -26,221 +26,222 @@ logger = logging.getLogger(__name__) class NicConfigurator: - def __init__(self, nics): - """ - Initialize the Nic Configurator - @param nics (list) an array of nics to configure - """ - self.nics = nics - self.mac2Name = {} - self.ipv4PrimaryGateway = None - self.ipv6PrimaryGateway = None - self.find_devices() - self._primaryNic = self.get_primary_nic() - - def get_primary_nic(self): - """ - Retrieve the primary nic if it exists - @return (NicBase): the primary nic if exists, None otherwise - """ - primaryNic = None - - for nic in self.nics: - if nic.primary: - if primaryNic: - raise Exception('There can only be one primary nic', - primaryNic.mac, nic.mac) + def __init__(self, nics): + """ + Initialize the Nic Configurator + @param nics (list) an array of nics to configure + """ + self.nics = nics + self.mac2Name = {} + self.ipv4PrimaryGateway = None + self.ipv6PrimaryGateway = None + self.find_devices() + self._primaryNic = self.get_primary_nic() + + def get_primary_nic(self): + """ + Retrieve the primary nic if it exists + @return (NicBase): the primary nic if exists, None otherwise + """ + primaryNic = None + + for nic in self.nics: + if nic.primary: + if primaryNic: + raise Exception('There can only be one primary nic', + primaryNic.mac, nic.mac) primaryNic = nic - return primaryNic - - def find_devices(self): - """ - Create the mac2Name dictionary - The mac address(es) are in the lower case - """ - cmd = 'ip addr show' - outText = subprocess.check_output(cmd, shell=True).decode() - sections = re.split(r'\n\d+: ', '\n' + outText)[1:] - - macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' - for section in sections: - matcher = re.search(macPat, section) - if not matcher: # Only keep info about nics - continue - mac = matcher.group(1).lower() - name = section.split(':', 1)[0] - self.mac2Name[mac] = name - - def gen_one_nic(self, nic): - """ - Return the lines needed to configure a nic - @return (str list): the string list to configure the nic - @param nic (NicBase): the nic to configure - """ - lines = [] - name = self.mac2Name.get(nic.mac.lower()) - if not name: - raise ValueError('No known device has MACADDR: %s' % nic.mac) - - if nic.onboot: - lines.append('auto %s' % name) - - # Customize IPv4 - lines.extend(self.gen_ipv4(name, nic)) - - # Customize IPv6 - lines.extend(self.gen_ipv6(name, nic)) - - lines.append('') - - return lines - - def gen_ipv4(self, name, nic): - """ - Return the lines needed to configure the IPv4 setting of a nic - @return (str list): the string list to configure the gateways - @param name (str): name of the nic - @param nic (NicBase): the nic to configure - """ - lines = [] - - bootproto = nic.bootProto.lower() - if nic.ipv4_mode.lower() == 'disabled': - bootproto = 'manual' - lines.append('iface %s inet %s' % (name, bootproto)) - - if bootproto != 'static': - return lines - - # Static Ipv4 - v4 = nic.staticIpv4 - if v4.ip: - lines.append(' address %s' % v4.ip) - if v4.netmask: - lines.append(' netmask %s' % v4.netmask) - - # Add the primary gateway - if nic.primary and v4.gateways: - self.ipv4PrimaryGateway = v4.gateways[0] - lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway) - return lines - - # Add routes if there is no primary nic - if not self._primaryNic: - lines.extend(self.gen_ipv4_route(nic, v4.gateways)) - - return lines - - def gen_ipv4_route(self, nic, gateways): - """ - Return the lines needed to configure additional Ipv4 route - @return (str list): the string list to configure the gateways - @param nic (NicBase): the nic to configure - @param gateways (str list): the list of gateways - """ - lines = [] - - for gateway in gateways: - lines.append(' up route add default gw %s metric 10000' % gateway) - - return lines - - def gen_ipv6(self, name, nic): - """ - Return the lines needed to configure the gateways for a nic - @return (str list): the string list to configure the gateways - @param name (str): name of the nic - @param nic (NicBase): the nic to configure - """ - lines = [] - - if not nic.staticIpv6: - return lines - - # Static Ipv6 - addrs = nic.staticIpv6 - lines.append('iface %s inet6 static' % name) - lines.append(' address %s' % addrs[0].ip) - lines.append(' netmask %s' % addrs[0].netmask) - - for addr in addrs[1:]: - lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip, - addr.netmask)) - # Add the primary gateway - if nic.primary: - for addr in addrs: - if addr.gateway: - self.ipv6PrimaryGateway = addr.gateway - lines.append(' gateway %s' % self.ipv6PrimaryGateway) - return lines - - # Add routes if there is no primary nic - if not self._primaryNic: - lines.extend(self._genIpv6Route(name, nic, addrs)) - - return lines - - def _genIpv6Route(self, name, nic, addrs): - lines = [] - - for addr in addrs: - lines.append(' up route -A inet6 add default gw %s metric 10000' % - addr.gateway) - - return lines - - def generate(self): - """Return the lines that is needed to configure the nics""" - lines = [] - lines.append('iface lo inet loopback') - lines.append('auto lo') - lines.append('') - - for nic in self.nics: - lines.extend(self.gen_one_nic(nic)) - - return lines - - def clear_dhcp(self): - logger.info('Clearing DHCP leases') - - subprocess.call('pkill dhclient', shell=True) - subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True) - - def if_down_up(self): - names = [] - for nic in self.nics: - name = self.mac2Name.get(nic.mac.lower()) - names.append(name) - - for name in names: - logger.info('Bring down interface %s' % name) - subprocess.check_call('ifdown %s' % name, shell=True) - - self.clear_dhcp() - - for name in names: - logger.info('Bring up interface %s' % name) - subprocess.check_call('ifup %s' % name, shell=True) - - def configure(self): - """ - Configure the /etc/network/intefaces - Make a back up of the original - """ - containingDir = '/etc/network' - - interfaceFile = os.path.join(containingDir, 'interfaces') - originalFile = os.path.join(containingDir, - 'interfaces.before_vmware_customization') - - if not os.path.exists(originalFile) and os.path.exists(interfaceFile): - os.rename(interfaceFile, originalFile) - - lines = self.generate() - with open(interfaceFile, 'w') as fp: - for line in lines: - fp.write('%s\n' % line) - - self.if_down_up() + return primaryNic + + def find_devices(self): + """ + Create the mac2Name dictionary + The mac address(es) are in the lower case + """ + cmd = 'ip addr show' + outText = subprocess.check_output(cmd, shell=True).decode() + sections = re.split(r'\n\d+: ', '\n' + outText)[1:] + + macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' + for section in sections: + matcher = re.search(macPat, section) + if not matcher: # Only keep info about nics + continue + mac = matcher.group(1).lower() + name = section.split(':', 1)[0] + self.mac2Name[mac] = name + + def gen_one_nic(self, nic): + """ + Return the lines needed to configure a nic + @return (str list): the string list to configure the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + name = self.mac2Name.get(nic.mac.lower()) + if not name: + raise ValueError('No known device has MACADDR: %s' % nic.mac) + + if nic.onboot: + lines.append('auto %s' % name) + + # Customize IPv4 + lines.extend(self.gen_ipv4(name, nic)) + + # Customize IPv6 + lines.extend(self.gen_ipv6(name, nic)) + + lines.append('') + + return lines + + def gen_ipv4(self, name, nic): + """ + Return the lines needed to configure the IPv4 setting of a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + bootproto = nic.bootProto.lower() + if nic.ipv4_mode.lower() == 'disabled': + bootproto = 'manual' + lines.append('iface %s inet %s' % (name, bootproto)) + + if bootproto != 'static': + return lines + + # Static Ipv4 + v4 = nic.staticIpv4 + if v4.ip: + lines.append(' address %s' % v4.ip) + if v4.netmask: + lines.append(' netmask %s' % v4.netmask) + + # Add the primary gateway + if nic.primary and v4.gateways: + self.ipv4PrimaryGateway = v4.gateways[0] + lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self.gen_ipv4_route(nic, v4.gateways)) + + return lines + + def gen_ipv4_route(self, nic, gateways): + """ + Return the lines needed to configure additional Ipv4 route + @return (str list): the string list to configure the gateways + @param nic (NicBase): the nic to configure + @param gateways (str list): the list of gateways + """ + lines = [] + + for gateway in gateways: + lines.append(' up route add default gw %s metric 10000' % + gateway) + + return lines + + def gen_ipv6(self, name, nic): + """ + Return the lines needed to configure the gateways for a nic + @return (str list): the string list to configure the gateways + @param name (str): name of the nic + @param nic (NicBase): the nic to configure + """ + lines = [] + + if not nic.staticIpv6: + return lines + + # Static Ipv6 + addrs = nic.staticIpv6 + lines.append('iface %s inet6 static' % name) + lines.append(' address %s' % addrs[0].ip) + lines.append(' netmask %s' % addrs[0].netmask) + + for addr in addrs[1:]: + lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip, + addr.netmask)) + # Add the primary gateway + if nic.primary: + for addr in addrs: + if addr.gateway: + self.ipv6PrimaryGateway = addr.gateway + lines.append(' gateway %s' % self.ipv6PrimaryGateway) + return lines + + # Add routes if there is no primary nic + if not self._primaryNic: + lines.extend(self._genIpv6Route(name, nic, addrs)) + + return lines + + def _genIpv6Route(self, name, nic, addrs): + lines = [] + + for addr in addrs: + lines.append(' up route -A inet6 add default gw %s metric 10000' % + addr.gateway) + + return lines + + def generate(self): + """Return the lines that is needed to configure the nics""" + lines = [] + lines.append('iface lo inet loopback') + lines.append('auto lo') + lines.append('') + + for nic in self.nics: + lines.extend(self.gen_one_nic(nic)) + + return lines + + def clear_dhcp(self): + logger.info('Clearing DHCP leases') + + subprocess.call('pkill dhclient', shell=True) + subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True) + + def if_down_up(self): + names = [] + for nic in self.nics: + name = self.mac2Name.get(nic.mac.lower()) + names.append(name) + + for name in names: + logger.info('Bring down interface %s' % name) + subprocess.check_call('ifdown %s' % name, shell=True) + + self.clear_dhcp() + + for name in names: + logger.info('Bring up interface %s' % name) + subprocess.check_call('ifup %s' % name, shell=True) + + def configure(self): + """ + Configure the /etc/network/intefaces + Make a back up of the original + """ + containingDir = '/etc/network' + + interfaceFile = os.path.join(containingDir, 'interfaces') + originalFile = os.path.join(containingDir, + 'interfaces.before_vmware_customization') + + if not os.path.exists(originalFile) and os.path.exists(interfaceFile): + os.rename(interfaceFile, originalFile) + + lines = self.generate() + with open(interfaceFile, 'w') as fp: + for line in lines: + fp.write('%s\n' % line) + + self.if_down_up() diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py index 6628a3ec..b5d704ea 100644 --- a/cloudinit/sources/helpers/vmware/imc/nic.py +++ b/cloudinit/sources/helpers/vmware/imc/nic.py @@ -49,35 +49,35 @@ class Nic(NicBase): def primary(self): value = self._get('PRIMARY') if value: - value = value.lower() - return value == 'yes' or value == 'true' + value = value.lower() + return value == 'yes' or value == 'true' else: - return False + return False @property def onboot(self): value = self._get('ONBOOT') if value: - value = value.lower() - return value == 'yes' or value == 'true' + value = value.lower() + return value == 'yes' or value == 'true' else: - return False + return False @property def bootProto(self): value = self._get('BOOTPROTO') if value: - return value.lower() + return value.lower() else: - return "" + return "" @property def ipv4_mode(self): value = self._get('IPv4_MODE') if value: - return value.lower() + return value.lower() else: - return "" + return "" @property def staticIpv4(self): diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py index 51166dd7..d5c7367b 100644 --- a/tests/unittests/test_vmware_config_file.py +++ b/tests/unittests/test_vmware_config_file.py @@ -36,12 +36,12 @@ class TestVmwareConfigFile(unittest.TestCase): cf.clear() - self.assertEqual(0, cf.size(), "clear size") + self.assertEqual(0, len(cf), "clear size") cf._insertKey(" PASSWORD|-PASS ", " foo ") cf._insertKey("BAR", " ") - self.assertEqual(2, cf.size(), "insert size") + self.assertEqual(2, len(cf), "insert size") self.assertEqual('foo', cf["PASSWORD|-PASS"], "password") self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword") self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"), -- cgit v1.2.3 From c5d2f79a982258d86181368b25ce6bc6638ef645 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Thu, 18 Feb 2016 18:31:07 -0800 Subject: - Removed dmi_data function. - Fixed few variable names. - Used util.subp methods for process related manipulations. --- cloudinit/sources/DataSourceOVF.py | 20 +++-------- cloudinit/sources/helpers/vmware/imc/config_nic.py | 40 +++++++++++----------- 2 files changed, 24 insertions(+), 36 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6d3bf7bb..72ba5aba 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -63,15 +63,11 @@ class DataSourceOVF(sources.DataSource): } (seedfile, contents) = get_ovf_env(self.paths.seed_dir) - dmi_info = dmi_data() - system_type = "" - if dmi_info is None: - LOG.debug("No dmidata utility found") - else: - (_, system_type) = dmi_info - - if 'vmware' in system_type.lower(): + system_type = util.read_dmi_data("system-product-name") + if system_type is None: + LOG.debug("No system-product-name found") + elif 'vmware' in system_type.lower(): LOG.debug("VMware Virtual Platform found") deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") if deployPkgPluginPath: @@ -347,14 +343,6 @@ def get_properties(contents): return props -def dmi_data(): - sys_uuid = util.read_dmi_data("system-uuid") - sys_type = util.read_dmi_data("system-product-name") - - if not sys_uuid or not sys_type: - return None - - return (sys_uuid.lower(), sys_type) def search_file(dirpath, filename): if not dirpath or not filename: diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index d79e6936..172a1649 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -22,6 +22,8 @@ import os import subprocess import re +from cloudinit import util + logger = logging.getLogger(__name__) @@ -43,32 +45,30 @@ class NicConfigurator: Retrieve the primary nic if it exists @return (NicBase): the primary nic if exists, None otherwise """ - primaryNic = None - - for nic in self.nics: - if nic.primary: - if primaryNic: - raise Exception('There can only be one primary nic', - primaryNic.mac, nic.mac) - primaryNic = nic - - return primaryNic + primary_nics = [nic for nic in self.nics if nic.primary] + if not primary_nics: + return None + elif len(primary_nics) > 1: + raise Exception('There can only be one primary nic', + [nic.mac for nic in primary_nics]) + else: + return primary_nics[0] def find_devices(self): """ Create the mac2Name dictionary The mac address(es) are in the lower case """ - cmd = 'ip addr show' - outText = subprocess.check_output(cmd, shell=True).decode() - sections = re.split(r'\n\d+: ', '\n' + outText)[1:] + cmd = ['ip', 'addr', 'show'] + (output, err) = util.subp(cmd) + sections = re.split(r'\n\d+: ', '\n' + output)[1:] macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))' for section in sections: - matcher = re.search(macPat, section) - if not matcher: # Only keep info about nics + match = re.search(macPat, section) + if not match: # Only keep info about nics continue - mac = matcher.group(1).lower() + mac = match.group(1).lower() name = section.split(':', 1)[0] self.mac2Name[mac] = name @@ -206,8 +206,8 @@ class NicConfigurator: def clear_dhcp(self): logger.info('Clearing DHCP leases') - subprocess.call('pkill dhclient', shell=True) - subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True) + util.subp(["pkill", "dhclient"]) + util.subp(["rm", "-f", "/var/lib/dhcp/*"]) def if_down_up(self): names = [] @@ -217,13 +217,13 @@ class NicConfigurator: for name in names: logger.info('Bring down interface %s' % name) - subprocess.check_call('ifdown %s' % name, shell=True) + util.subp(["ifdown", "%s" % name]) self.clear_dhcp() for name in names: logger.info('Bring up interface %s' % name) - subprocess.check_call('ifup %s' % name, shell=True) + util.subp(["ifup", "%s" % name]) def configure(self): """ -- cgit v1.2.3 From 51a27968ae9805c747cdc27d35a31c49df6d2217 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 1 Mar 2016 16:43:50 -0800 Subject: Added a kill switch for customization on VMware platform. The customization is set to False by default and is triggered only when the option disable_vmware_customization is set to false in /etc/cloud/cloud.cfg --- cloudinit/sources/DataSourceOVF.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 72ba5aba..d92c128c 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -68,18 +68,23 @@ class DataSourceOVF(sources.DataSource): if system_type is None: LOG.debug("No system-product-name found") elif 'vmware' in system_type.lower(): - LOG.debug("VMware Virtual Platform found") - deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") - if deployPkgPluginPath: - vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, - msg="waiting for configuration file", - func=wait_for_imc_cfg_file, - args=("/tmp", "cust.cfg")) - - if vmwareImcConfigFilePath: - LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath) + LOG.debug("VMware Virtualization Platform found") + if not util.get_cfg_option_bool(self.sys_cfg, + "disable_vmware_customization", + True): + deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") + if deployPkgPluginPath: + vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, + msg="waiting for configuration file", + func=wait_for_imc_cfg_file, + args=("/tmp", "cust.cfg")) + + if vmwareImcConfigFilePath: + LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath) + else: + LOG.debug("Did not find VMware DeployPkg Config File Path") else: - LOG.debug("Didn't find VMware DeployPkg Config File Path") + LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: try: -- cgit v1.2.3 From ab6f166da7290928d56ff3c62a5280536e1d241f Mon Sep 17 00:00:00 2001 From: root Date: Wed, 2 Mar 2016 08:51:16 +0000 Subject: Added Bigstep datasource. --- cloudinit/sources/DataSourceBigstep.py | 48 ++++++++++++++++++++++++++++++++++ 1 file changed, 48 insertions(+) create mode 100644 cloudinit/sources/DataSourceBigstep.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py new file mode 100644 index 00000000..67d43eb3 --- /dev/null +++ b/cloudinit/sources/DataSourceBigstep.py @@ -0,0 +1,48 @@ +# +# Copyright (C) 2015-2016 Bigstep Cloud Ltd. +# +# Author: Alexandru Sirbu +# + +import json + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit import url_helper + +LOG = logging.getLogger(__name__) + + +class DataSourceBigstep(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.metadata = {} + self.vendordata_raw = "" + self.userdata_raw = "" + + + def get_data(self, apply_filter=False): + url = get_url_from_file() + response = url_helper.readurl(url) + decoded = json.loads(response.contents) + self.metadata = decoded["metadata"] + self.vendordata_raw = decoded["vendordata_raw"] + self.userdata_raw = decoded["userdata_raw"] + return True + + +def get_url_from_file(): + content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") + return content + +# Used to match classes to dependencies +datasources = [ + (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + -- cgit v1.2.3 From d5d89cfb1e61e6cc3f732a18ec1aa4d2b288489d Mon Sep 17 00:00:00 2001 From: root Date: Wed, 2 Mar 2016 08:53:47 +0000 Subject: Pep8 changes to Bigstep datasource. --- cloudinit/sources/DataSourceBigstep.py | 2 -- 1 file changed, 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index 67d43eb3..c22ffdb6 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -21,7 +21,6 @@ class DataSourceBigstep(sources.DataSource): self.vendordata_raw = "" self.userdata_raw = "" - def get_data(self, apply_filter=False): url = get_url_from_file() response = url_helper.readurl(url) @@ -45,4 +44,3 @@ datasources = [ # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) - -- cgit v1.2.3 From c496b6a11d504ef62371cb5e03ac80b4ceb37540 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 3 Mar 2016 12:20:48 -0500 Subject: run pyflakes in more places, fix fallout this makes 'make' run pyflakes, so failures there will stop a build. also adds it to tox. --- Makefile | 6 ++++-- cloudinit/sources/DataSourceOVF.py | 3 ++- cloudinit/sources/helpers/vmware/imc/config_nic.py | 1 - cloudinit/util.py | 2 +- tests/unittests/test_datasource/test_azure_helper.py | 2 -- tests/unittests/test_datasource/test_smartos.py | 1 - tests/unittests/test_handler/test_handler_power_state.py | 2 +- tox.ini | 6 +++++- 8 files changed, 13 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/Makefile b/Makefile index bb0c5253..8987d51c 100644 --- a/Makefile +++ b/Makefile @@ -14,13 +14,15 @@ ifeq ($(distro),) distro = redhat endif -all: test check_version +all: check + +check: test check_version pyflakes pep8: @$(CWD)/tools/run-pep8 $(PY_FILES) pyflakes: - @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES) + @pyflakes $(PY_FILES) pip-requirements: @echo "Installing cloud-init dependencies..." diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 72ba5aba..d12601a4 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -90,7 +90,8 @@ class DataSourceOVF(sources.DataSource): nicConfigurator.configure() vmwarePlatformFound = True except Exception as inst: - LOG.debug("Error while parsing the Customization Config File") + LOG.debug("Error while parsing the Customization " + "Config File: %s", inst) finally: dirPath = os.path.dirname(vmwareImcConfigFilePath) shutil.rmtree(dirPath) diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 172a1649..6d721134 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -19,7 +19,6 @@ import logging import os -import subprocess import re from cloudinit import util diff --git a/cloudinit/util.py b/cloudinit/util.py index 45d49e66..0a639bb9 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -2147,7 +2147,7 @@ def _read_dmi_syspath(key): LOG.debug("dmi data %s returned %s", dmi_key_path, key_data) return key_data.strip() - except Exception as e: + except Exception: logexc(LOG, "failed read of %s", dmi_key_path) return None diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py index 8dbdfb0b..1134199b 100644 --- a/tests/unittests/test_datasource/test_azure_helper.py +++ b/tests/unittests/test_datasource/test_azure_helper.py @@ -1,6 +1,4 @@ import os -import struct -import unittest from cloudinit.sources.helpers import azure as azure_helper from ..helpers import TestCase diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 1235436d..ccb9f080 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -31,7 +31,6 @@ import shutil import stat import tempfile import uuid -import unittest from binascii import crc32 import serial diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 5687b10d..cd376e9c 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -107,7 +107,7 @@ def check_lps_ret(psc_return, mode=None): if 'shutdown' not in psc_return[0][0]: errs.append("string 'shutdown' not in cmd") - if 'condition' is None: + if condition is None: errs.append("condition was not returned") if mode is not None: diff --git a/tox.ini b/tox.ini index b72df0c9..fd65f6ef 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py27,py3 +envlist = py27,py3,pyflakes recreate = True [testenv] @@ -10,6 +10,10 @@ deps = -r{toxinidir}/test-requirements.txt [testenv:py3] basepython = python3 +[testenv:pyflakes] +basepython = python3 +commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/} + # https://github.com/gabrielfalcao/HTTPretty/issues/223 setenv = LC_ALL = en_US.utf-8 -- cgit v1.2.3 From 8cb7c3f7b5339e686bfbf95996b51afafeaf9c9e Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 3 Mar 2016 16:20:10 -0600 Subject: Update pep8 runner and fix pep8 issues --- Makefile | 9 ++-- bin/cloud-init | 43 +++++++++--------- cloudinit/config/cc_apt_configure.py | 6 ++- cloudinit/config/cc_disk_setup.py | 31 +++++++------ cloudinit/config/cc_grub_dpkg.py | 8 ++-- cloudinit/config/cc_keys_to_console.py | 2 +- cloudinit/config/cc_lxd.py | 2 +- cloudinit/config/cc_mounts.py | 12 ++--- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/config/cc_puppet.py | 6 +-- cloudinit/config/cc_resizefs.py | 2 +- cloudinit/config/cc_rh_subscription.py | 4 +- cloudinit/config/cc_set_hostname.py | 2 +- cloudinit/config/cc_ssh.py | 7 +-- cloudinit/config/cc_update_etc_hosts.py | 6 +-- cloudinit/config/cc_update_hostname.py | 2 +- cloudinit/config/cc_yum_add_repo.py | 2 +- cloudinit/distros/__init__.py | 12 ++--- cloudinit/distros/arch.py | 6 +-- cloudinit/distros/debian.py | 5 ++- cloudinit/distros/freebsd.py | 4 +- cloudinit/distros/gentoo.py | 4 +- cloudinit/distros/parsers/hostname.py | 2 +- cloudinit/distros/parsers/resolv_conf.py | 2 +- cloudinit/distros/parsers/sys_conf.py | 7 ++- cloudinit/filters/launch_index.py | 2 +- cloudinit/helpers.py | 7 +-- cloudinit/sources/DataSourceAzure.py | 21 +++++---- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/DataSourceEc2.py | 10 ++--- cloudinit/sources/DataSourceMAAS.py | 15 ++++--- cloudinit/sources/DataSourceOVF.py | 4 +- cloudinit/sources/DataSourceOpenNebula.py | 3 +- cloudinit/sources/DataSourceSmartOS.py | 7 ++- cloudinit/ssh_util.py | 3 +- cloudinit/stages.py | 18 ++++---- cloudinit/url_helper.py | 6 +-- cloudinit/util.py | 15 ++++--- tests/unittests/test_data.py | 5 ++- tests/unittests/test_datasource/test_altcloud.py | 23 +++++----- tests/unittests/test_datasource/test_azure.py | 15 ++++--- .../unittests/test_datasource/test_configdrive.py | 12 ++--- tests/unittests/test_datasource/test_maas.py | 16 +++---- tests/unittests/test_datasource/test_smartos.py | 6 +-- .../test_handler/test_handler_power_state.py | 3 +- .../test_handler/test_handler_seed_random.py | 3 +- .../unittests/test_handler/test_handler_snappy.py | 3 +- tests/unittests/test_sshutil.py | 3 +- tests/unittests/test_templating.py | 3 +- tools/hacking.py | 16 +++---- tools/mock-meta.py | 27 +++++++----- tools/run-pep8 | 51 ++++++++-------------- 52 files changed, 244 insertions(+), 243 deletions(-) (limited to 'cloudinit/sources') diff --git a/Makefile b/Makefile index 058ac199..fb65b70b 100644 --- a/Makefile +++ b/Makefile @@ -20,13 +20,14 @@ all: test check_version check: pep8 pyflakes pyflakes3 unittest pep8: - @$(CWD)/tools/run-pep8 $(PY_FILES) + @$(CWD)/tools/run-pep8 pyflakes: - @$(CWD)/tools/tox-venv py27 pyflakes $(PY_FILES) + @$(CWD)/tools/run-pyflakes -pyflakes: - @$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES) +pyflakes3: + @$(CWD)/tools/run-pyflakes3 + unittest: nosetests $(noseopts) tests/unittests diff --git a/bin/cloud-init b/bin/cloud-init index 9b90c45e..7f665e7e 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -194,7 +194,7 @@ def main_init(name, args): if args.debug: # Reset so that all the debug handlers are closed out LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + " longer be active shortly")) logging.resetLogging() logging.setupLogging(init.cfg) apply_reporting_cfg(init.cfg) @@ -276,9 +276,9 @@ def main_init(name, args): # This may run user-data handlers and/or perform # url downloads and such as needed. (ran, _results) = init.cloudify().run('consume_data', - init.consume_data, - args=[PER_INSTANCE], - freq=PER_INSTANCE) + init.consume_data, + args=[PER_INSTANCE], + freq=PER_INSTANCE) if not ran: # Just consume anything that is set to run per-always # if nothing ran in the per-instance code @@ -349,7 +349,7 @@ def main_modules(action_name, args): if args.debug: # Reset so that all the debug handlers are closed out LOG.debug(("Logging being reset, this logger may no" - " longer be active shortly")) + " longer be active shortly")) logging.resetLogging() logging.setupLogging(mods.cfg) apply_reporting_cfg(init.cfg) @@ -534,7 +534,8 @@ def status_wrapper(name, args, data_d=None, link_d=None): errors.extend(v1[m].get('errors', [])) atomic_write_json(result_path, - {'v1': {'datasource': v1['datasource'], 'errors': errors}}) + {'v1': {'datasource': v1['datasource'], + 'errors': errors}}) util.sym_link(os.path.relpath(result_path, link_d), result_link, force=True) @@ -578,13 +579,13 @@ def main(): # These settings are used for the 'config' and 'final' stages parser_mod = subparsers.add_parser('modules', - help=('activates modules ' - 'using a given configuration key')) + help=('activates modules using ' + 'a given configuration key')) parser_mod.add_argument("--mode", '-m', action='store', - help=("module configuration name " - "to use (default: %(default)s)"), - default='config', - choices=('init', 'config', 'final')) + help=("module configuration name " + "to use (default: %(default)s)"), + default='config', + choices=('init', 'config', 'final')) parser_mod.set_defaults(action=('modules', main_modules)) # These settings are used when you want to query information @@ -600,22 +601,22 @@ def main(): # This subcommand allows you to run a single module parser_single = subparsers.add_parser('single', - help=('run a single module ')) + help=('run a single module ')) parser_single.set_defaults(action=('single', main_single)) parser_single.add_argument("--name", '-n', action="store", - help="module name to run", - required=True) + help="module name to run", + required=True) parser_single.add_argument("--frequency", action="store", - help=("frequency of the module"), - required=False, - choices=list(FREQ_SHORT_NAMES.keys())) + help=("frequency of the module"), + required=False, + choices=list(FREQ_SHORT_NAMES.keys())) parser_single.add_argument("--report", action="store_true", help="enable reporting", required=False) parser_single.add_argument("module_args", nargs="*", - metavar='argument', - help=('any additional arguments to' - ' pass to this module')) + metavar='argument', + help=('any additional arguments to' + ' pass to this module')) parser_single.set_defaults(action=('single', main_single)) args = parser.parse_args() diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py index 9e9e9e26..702977cb 100644 --- a/cloudinit/config/cc_apt_configure.py +++ b/cloudinit/config/cc_apt_configure.py @@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args): if matchcfg: matcher = re.compile(matchcfg).search else: - matcher = lambda f: False + def matcher(x): + return False errors = add_sources(cfg['apt_sources'], params, aa_repo_match=matcher) @@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None): template_params = {} if aa_repo_match is None: - aa_repo_match = lambda f: False + def aa_repo_match(x): + return False errorlist = [] for ent in srclist: diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py index d5b0d1d7..0ecc2e4c 100644 --- a/cloudinit/config/cc_disk_setup.py +++ b/cloudinit/config/cc_disk_setup.py @@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False): parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0] for part in parts: - d = {'name': None, - 'type': None, - 'fstype': None, - 'label': None, - } + d = { + 'name': None, + 'type': None, + 'fstype': None, + 'label': None, + } for key, value in value_splitter(part): d[key.lower()] = value @@ -701,11 +702,12 @@ def lookup_force_flag(fs): """ A force flag might be -F or -F, this look it up """ - flags = {'ext': '-F', - 'btrfs': '-f', - 'xfs': '-f', - 'reiserfs': '-f', - } + flags = { + 'ext': '-F', + 'btrfs': '-f', + 'xfs': '-f', + 'reiserfs': '-f', + } if 'ext' in fs.lower(): fs = 'ext' @@ -824,10 +826,11 @@ def mkfs(fs_cfg): # Create the commands if fs_cmd: - fs_cmd = fs_cfg['cmd'] % {'label': label, - 'filesystem': fs_type, - 'device': device, - } + fs_cmd = fs_cfg['cmd'] % { + 'label': label, + 'filesystem': fs_type, + 'device': device, + } else: # Find the mkfs command mkfs_cmd = util.which("mkfs.%s" % fs_type) diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py index 456597af..acd3e60a 100644 --- a/cloudinit/config/cc_grub_dpkg.py +++ b/cloudinit/config/cc_grub_dpkg.py @@ -38,11 +38,11 @@ def handle(name, cfg, _cloud, log, _args): idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None) idevs_empty = util.get_cfg_option_str(mycfg, - "grub-pc/install_devices_empty", None) + "grub-pc/install_devices_empty", + None) if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or - (os.path.exists("/dev/xvda1") - and not os.path.exists("/dev/xvda"))): + (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))): if idevs is None: idevs = "" if idevs_empty is None: @@ -66,7 +66,7 @@ def handle(name, cfg, _cloud, log, _args): (idevs, idevs_empty)) log.debug("Setting grub debconf-set-selections with '%s','%s'" % - (idevs, idevs_empty)) + (idevs, idevs_empty)) try: util.subp(['debconf-set-selections'], dconf_sel) diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index f1c1adff..aa844ee9 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args): "ssh_fp_console_blacklist", []) key_blacklist = util.get_cfg_option_list(cfg, "ssh_key_console_blacklist", - ["ssh-dss"]) + ["ssh-dss"]) try: cmd = [helper_path] diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py index 7d8a0202..e2fdf68e 100644 --- a/cloudinit/config/cc_lxd.py +++ b/cloudinit/config/cc_lxd.py @@ -59,7 +59,7 @@ def handle(name, cfg, cloud, log, args): if init_cfg: if not isinstance(init_cfg, dict): log.warn("lxd/init config must be a dictionary. found a '%s'", - type(init_cfg)) + type(init_cfg)) return cmd = ['lxd', 'init', '--auto'] for k in init_keys: diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py index 11089d8d..4fe3ee21 100644 --- a/cloudinit/config/cc_mounts.py +++ b/cloudinit/config/cc_mounts.py @@ -204,12 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None): try: util.ensure_dir(tdir) util.log_time(LOG.debug, msg, func=util.subp, - args=[['sh', '-c', - ('rm -f "$1" && umask 0066 && ' - '{ fallocate -l "${2}M" "$1" || ' - ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' - 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), - 'setup_swap', fname, mbsize]]) + args=[['sh', '-c', + ('rm -f "$1" && umask 0066 && ' + '{ fallocate -l "${2}M" "$1" || ' + ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && ' + 'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'), + 'setup_swap', fname, mbsize]]) except Exception as e: raise IOError("Failed %s: %s" % (msg, e)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index 7d9567e3..cc3f7f70 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -105,7 +105,7 @@ def handle(_name, cfg, _cloud, log, _args): log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args))) - util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, + util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, condition, execmd, [args, devnull_fp]) diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py index 4501598e..774d3322 100644 --- a/cloudinit/config/cc_puppet.py +++ b/cloudinit/config/cc_puppet.py @@ -36,8 +36,8 @@ def _autostart_puppet(log): # Set puppet to automatically start if os.path.exists('/etc/default/puppet'): util.subp(['sed', '-i', - '-e', 's/^START=.*/START=yes/', - '/etc/default/puppet'], capture=False) + '-e', 's/^START=.*/START=yes/', + '/etc/default/puppet'], capture=False) elif os.path.exists('/bin/systemctl'): util.subp(['/bin/systemctl', 'enable', 'puppet.service'], capture=False) @@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args): " doing nothing.")) elif install: log.debug(("Attempting to install puppet %s,"), - version if version else 'latest') + version if version else 'latest') cloud.distro.install_packages(('puppet', version)) # ... and then update the puppet configuration diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py index cbc07853..2a2a9f59 100644 --- a/cloudinit/config/cc_resizefs.py +++ b/cloudinit/config/cc_resizefs.py @@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args): func=do_resize, args=(resize_cmd, log)) else: util.log_time(logfunc=log.debug, msg="Resizing", - func=do_resize, args=(resize_cmd, log)) + func=do_resize, args=(resize_cmd, log)) action = 'Resized' if resize_root == NOBLOCK: diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py index 3b30c47e..6f474aed 100644 --- a/cloudinit/config/cc_rh_subscription.py +++ b/cloudinit/config/cc_rh_subscription.py @@ -127,8 +127,8 @@ class SubscriptionManager(object): return False, not_bool if (self.servicelevel is not None) and \ - ((not self.auto_attach) - or (util.is_false(str(self.auto_attach)))): + ((not self.auto_attach) or + (util.is_false(str(self.auto_attach)))): no_auto = ("The service-level key must be used in conjunction " "with the auto-attach key. Please re-run with " diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index 5d7f4331..f43d8d5a 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -24,7 +24,7 @@ from cloudinit import util def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not setting the hostname in module %s"), name) + " not setting the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py index 5bd2dec6..d24e43c0 100644 --- a/cloudinit/config/cc_ssh.py +++ b/cloudinit/config/cc_ssh.py @@ -30,9 +30,10 @@ from cloudinit import distros as ds from cloudinit import ssh_util from cloudinit import util -DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding," -"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " -"rather than the user \\\"root\\\".\';echo;sleep 10\"") +DISABLE_ROOT_OPTS = ( + "no-port-forwarding,no-agent-forwarding," + "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\"" + " rather than the user \\\"root\\\".\';echo;sleep 10\"") GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519'] KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key' diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py index d3dd1f32..15703efe 100644 --- a/cloudinit/config/cc_update_etc_hosts.py +++ b/cloudinit/config/cc_update_etc_hosts.py @@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args): if not tpl_fn_name: raise RuntimeError(("No hosts template could be" " found for distro %s") % - (cloud.distro.osfamily)) + (cloud.distro.osfamily)) templater.render_to_file(tpl_fn_name, '/etc/hosts', - {'hostname': hostname, 'fqdn': fqdn}) + {'hostname': hostname, 'fqdn': fqdn}) elif manage_hosts == "localhost": (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) @@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args): cloud.distro.update_etc_hosts(hostname, fqdn) else: log.debug(("Configuration option 'manage_etc_hosts' is not set," - " not managing /etc/hosts in module %s"), name) + " not managing /etc/hosts in module %s"), name) diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py index e396ba13..5b78afe1 100644 --- a/cloudinit/config/cc_update_hostname.py +++ b/cloudinit/config/cc_update_hostname.py @@ -29,7 +29,7 @@ frequency = PER_ALWAYS def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," - " not updating the hostname in module %s"), name) + " not updating the hostname in module %s"), name) return (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py index 3b821af9..64fba869 100644 --- a/cloudinit/config/cc_yum_add_repo.py +++ b/cloudinit/config/cc_yum_add_repo.py @@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args): for req_field in ['baseurl']: if req_field not in repo_config: log.warn(("Repository %s does not contain a %s" - " configuration 'required' entry"), + " configuration 'required' entry"), repo_id, req_field) missing_required += 1 if not missing_required: diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 71884b32..661a9fd2 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -211,8 +211,8 @@ class Distro(object): # If the system hostname is different than the previous # one or the desired one lets update it as well - if (not sys_hostname) or (sys_hostname == prev_hostname - and sys_hostname != hostname): + if ((not sys_hostname) or (sys_hostname == prev_hostname and + sys_hostname != hostname)): update_files.append(sys_fn) # If something else has changed the hostname after we set it @@ -221,7 +221,7 @@ class Distro(object): if (sys_hostname and prev_hostname and sys_hostname != prev_hostname): LOG.info("%s differs from %s, assuming user maintained hostname.", - prev_hostname_fn, sys_fn) + prev_hostname_fn, sys_fn) return # Remove duplicates (incase the previous config filename) @@ -289,7 +289,7 @@ class Distro(object): def _bring_up_interface(self, device_name): cmd = ['ifup', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -548,7 +548,7 @@ class Distro(object): for member in members: if not util.is_user(member): LOG.warn("Unable to add group member '%s' to group '%s'" - "; user does not exist.", member, name) + "; user does not exist.", member, name) continue util.subp(['usermod', '-a', '-G', name, member]) @@ -886,7 +886,7 @@ def fetch(name): locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro']) if not locs: raise ImportError("No distribution found for distro %s (searched %s)" - % (name, looked_locs)) + % (name, looked_locs)) mod = importer.import_module(locs[0]) cls = getattr(mod, 'Distro') return cls diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index 45fcf26f..93a2e008 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -74,7 +74,7 @@ class Distro(distros.Distro): 'Interface': dev, 'IP': info.get('bootproto'), 'Address': "('%s/%s')" % (info.get('address'), - info.get('netmask')), + info.get('netmask')), 'Gateway': info.get('gateway'), 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '') } @@ -86,7 +86,7 @@ class Distro(distros.Distro): if nameservers: util.write_file(self.resolve_conf_fn, - convert_resolv_conf(nameservers)) + convert_resolv_conf(nameservers)) return dev_names @@ -102,7 +102,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['netctl', 'restart', device_name] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 6d3a82bf..db5890b1 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -159,8 +159,9 @@ class Distro(distros.Distro): # Allow the output of this to flow outwards (ie not be captured) util.log_time(logfunc=LOG.debug, - msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp, - args=(cmd,), kwargs={'env': e, 'capture': False}) + msg="apt-%s [%s]" % (command, ' '.join(cmd)), + func=util.subp, + args=(cmd,), kwargs={'env': e, 'capture': False}) def update_package_sources(self): self._runner.run("update-sources", self.package_command, diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index 4c484639..72012056 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -205,8 +205,8 @@ class Distro(distros.Distro): redact_opts = ['passwd'] for key, val in kwargs.items(): - if (key in adduser_opts and val - and isinstance(val, six.string_types)): + if (key in adduser_opts and val and + isinstance(val, six.string_types)): adduser_cmd.extend([adduser_opts[key], val]) # Redact certain fields from the logs diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py index 9e80583c..6267dd6e 100644 --- a/cloudinit/distros/gentoo.py +++ b/cloudinit/distros/gentoo.py @@ -66,7 +66,7 @@ class Distro(distros.Distro): def _bring_up_interface(self, device_name): cmd = ['/etc/init.d/net.%s' % device_name, 'restart'] LOG.debug("Attempting to run bring up interface %s using command %s", - device_name, cmd) + device_name, cmd) try: (_out, err) = util.subp(cmd) if len(err): @@ -88,7 +88,7 @@ class Distro(distros.Distro): (_out, err) = util.subp(cmd) if len(err): LOG.warn("Running %s resulted in stderr output: %s", cmd, - err) + err) except util.ProcessExecutionError: util.logexc(LOG, "Running interface command %s failed", cmd) return False diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py index 84a1de42..efb185d4 100644 --- a/cloudinit/distros/parsers/hostname.py +++ b/cloudinit/distros/parsers/hostname.py @@ -84,5 +84,5 @@ class HostnameConf(object): hostnames_found.add(head) if len(hostnames_found) > 1: raise IOError("Multiple hostnames (%s) found!" - % (hostnames_found)) + % (hostnames_found)) return entries diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py index 8aee03a4..2ed13d9c 100644 --- a/cloudinit/distros/parsers/resolv_conf.py +++ b/cloudinit/distros/parsers/resolv_conf.py @@ -132,7 +132,7 @@ class ResolvConf(object): # Some hard limit on 256 chars total raise ValueError(("Adding %r would go beyond the " "256 maximum search list character limit") - % (search_domain)) + % (search_domain)) self._remove_option('search') self._contents.append(('option', ['search', s_list, ''])) return flat_sds diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py index d795e12f..6157cf32 100644 --- a/cloudinit/distros/parsers/sys_conf.py +++ b/cloudinit/distros/parsers/sys_conf.py @@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj): quot_func = None if value[0] in ['"', "'"] and value[-1] in ['"', "'"]: if len(value) == 1: - quot_func = (lambda x: - self._get_single_quote(x) % x) + quot_func = (lambda x: self._get_single_quote(x) % x) else: # Quote whitespace if it isn't the start + end of a shell command if value.strip().startswith("$(") and value.strip().endswith(")"): @@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj): # to use single quotes which won't get expanded... if re.search(r"[\n\"']", value): quot_func = (lambda x: - self._get_triple_quote(x) % x) + self._get_triple_quote(x) % x) else: quot_func = (lambda x: - self._get_single_quote(x) % x) + self._get_single_quote(x) % x) else: quot_func = pipes.quote if not quot_func: diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py index 5bebd318..baecdac9 100644 --- a/cloudinit/filters/launch_index.py +++ b/cloudinit/filters/launch_index.py @@ -61,7 +61,7 @@ class Filter(object): discarded += 1 LOG.debug(("Discarding %s multipart messages " "which do not match launch index %s"), - discarded, self.wanted_idx) + discarded, self.wanted_idx) new_message = copy.copy(message) new_message.set_payload(new_msgs) new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs)) diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py index 5e99d185..a6eb20fe 100644 --- a/cloudinit/helpers.py +++ b/cloudinit/helpers.py @@ -139,9 +139,10 @@ class FileSemaphores(object): # but the item had run before we did canon_sem_name. if cname != name and os.path.exists(self._get_path(name, freq)): LOG.warn("%s has run without canonicalized name [%s].\n" - "likely the migrator has not yet run. It will run next boot.\n" - "run manually with: cloud-init single --name=migrator" - % (name, cname)) + "likely the migrator has not yet run. " + "It will run next boot.\n" + "run manually with: cloud-init single --name=migrator" + % (name, cname)) return True return False diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index bd80a8a6..b03ab895 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -38,7 +38,8 @@ LOG = logging.getLogger(__name__) DS_NAME = 'Azure' DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"} AGENT_START = ['service', 'walinuxagent', 'start'] -BOUNCE_COMMAND = ['sh', '-xc', +BOUNCE_COMMAND = [ + 'sh', '-xc', "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"] BUILTIN_DS_CONFIG = { @@ -91,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): """ policy = cfg['hostname_bounce']['policy'] previous_hostname = get_hostname(hostname_command) - if (not util.is_true(cfg.get('set_hostname')) - or util.is_false(policy) - or (previous_hostname == temp_hostname and policy != 'force')): + if (not util.is_true(cfg.get('set_hostname')) or + util.is_false(policy) or + (previous_hostname == temp_hostname and policy != 'force')): yield None return set_hostname(temp_hostname, hostname_command) @@ -123,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource): with temporary_hostname(temp_hostname, self.ds_cfg, hostname_command=hostname_command) \ as previous_hostname: - if (previous_hostname is not None - and util.is_true(self.ds_cfg.get('set_hostname'))): + if (previous_hostname is not None and + util.is_true(self.ds_cfg.get('set_hostname'))): cfg = self.ds_cfg['hostname_bounce'] try: perform_hostname_bounce(hostname=temp_hostname, @@ -152,7 +153,8 @@ class DataSourceAzureNet(sources.DataSource): else: bname = str(pk['fingerprint'] + ".crt") fp_files += [os.path.join(ddir, bname)] - LOG.debug("ssh authentication: using fingerprint from fabirc") + LOG.debug("ssh authentication: " + "using fingerprint from fabirc") missing = util.log_time(logfunc=LOG.debug, msg="waiting for files", func=wait_for_files, @@ -506,7 +508,7 @@ def read_azure_ovf(contents): raise BrokenAzureDataSource("invalid xml: %s" % e) results = find_child(dom.documentElement, - lambda n: n.localName == "ProvisioningSection") + lambda n: n.localName == "ProvisioningSection") if len(results) == 0: raise NonAzureDataSource("No ProvisioningSection") @@ -516,7 +518,8 @@ def read_azure_ovf(contents): provSection = results[0] lpcs_nodes = find_child(provSection, - lambda n: n.localName == "LinuxProvisioningConfigurationSet") + lambda n: + n.localName == "LinuxProvisioningConfigurationSet") if len(results) == 0: raise NonAzureDataSource("No LinuxProvisioningConfigurationSet") diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index eb474079..e3916208 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660') LABEL_TYPES = ('config-2',) POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS - for i in range(0, 2))) + for i in range(0, 2))) class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 0032d06c..6a897f7d 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource): if not self.wait_for_metadata_service(): return False start_time = time.time() - self.userdata_raw = ec2.get_instance_userdata(self.api_ver, - self.metadata_address) + self.userdata_raw = \ + ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + int(time.time() - start_time)) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource): start_time = time.time() url = uhelp.wait_for_url(urls=urls, max_wait=max_wait, - timeout=timeout, status_cb=LOG.warn) + timeout=timeout, status_cb=LOG.warn) if url: LOG.debug("Using metadata source: '%s'", url2base[url]) else: LOG.critical("Giving up on md from %s after %s seconds", - urls, int(time.time() - start_time)) + urls, int(time.time() - start_time)) self.metadata_address = url2base.get(url) return bool(url) diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py index cfc59ca5..f18c4cee 100644 --- a/cloudinit/sources/DataSourceMAAS.py +++ b/cloudinit/sources/DataSourceMAAS.py @@ -275,17 +275,18 @@ if __name__ == "__main__": parser = argparse.ArgumentParser(description='Interact with MAAS DS') parser.add_argument("--config", metavar="file", - help="specify DS config file", default=None) + help="specify DS config file", default=None) parser.add_argument("--ckey", metavar="key", - help="the consumer key to auth with", default=None) + help="the consumer key to auth with", default=None) parser.add_argument("--tkey", metavar="key", - help="the token key to auth with", default=None) + help="the token key to auth with", default=None) parser.add_argument("--csec", metavar="secret", - help="the consumer secret (likely '')", default="") + help="the consumer secret (likely '')", default="") parser.add_argument("--tsec", metavar="secret", - help="the token secret to auth with", default=None) + help="the token secret to auth with", default=None) parser.add_argument("--apiver", metavar="version", - help="the apiver to use ("" can be used)", default=MD_VERSION) + help="the apiver to use ("" can be used)", + default=MD_VERSION) subcmds = parser.add_subparsers(title="subcommands", dest="subcmd") subcmds.add_parser('crawl', help="crawl the datasource") @@ -297,7 +298,7 @@ if __name__ == "__main__": args = parser.parse_args() creds = {'consumer_key': args.ckey, 'token_key': args.tkey, - 'token_secret': args.tsec, 'consumer_secret': args.csec} + 'token_secret': args.tsec, 'consumer_secret': args.csec} if args.config: cfg = util.read_conf(args.config) diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 58a4b2a2..adf9b12e 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -264,14 +264,14 @@ def get_properties(contents): # could also check here that elem.namespaceURI == # "http://schemas.dmtf.org/ovf/environment/1" propSections = find_child(dom.documentElement, - lambda n: n.localName == "PropertySection") + lambda n: n.localName == "PropertySection") if len(propSections) == 0: raise XmlError("No 'PropertySection's") props = {} propElems = find_child(propSections[0], - (lambda n: n.localName == "Property")) + (lambda n: n.localName == "Property")) for elem in propElems: key = elem.attributes.getNamedItemNS(envNsURI, "key").value diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ac2c3b45..b26940d1 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None): if ssh_key_var: lines = context.get(ssh_key_var).splitlines() results['metadata']['public-keys'] = [l for l in lines - if len(l) and not l.startswith("#")] + if len(l) and not + l.startswith("#")] # custom hostname -- try hostname or leave cloud-init # itself create hostname from IP address later diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7453379a..139ee52c 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -90,8 +90,7 @@ BUILTIN_DS_CONFIG = { 'user-data', 'user-script', 'sdc:datacenter_name', - 'sdc:uuid', - ], + 'sdc:uuid'], 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -450,7 +449,7 @@ class JoyentMetadataClient(object): response = bytearray() response.extend(self.metasource.read(1)) - while response[-1:] != b'\n': + while response[-1:] != b'\n': response.extend(self.metasource.read(1)) response = response.rstrip().decode('ascii') LOG.debug('Read "%s" from metadata transport.', response) @@ -513,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False, except Exception as e: util.logexc(LOG, ("Failed to identify script type for %s" % - content_f, e)) + content_f, e)) if link: try: diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py index 9b2f5ed5..c74a7ae2 100644 --- a/cloudinit/ssh_util.py +++ b/cloudinit/ssh_util.py @@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__) DEF_SSHD_CFG = "/etc/ssh/sshd_config" # taken from openssh source key.c/key_type_from_name -VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", +VALID_KEY_TYPES = ( + "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com", "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com", diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 9f192c8d..dbcf3d55 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -509,13 +509,13 @@ class Init(object): def consume_data(self, frequency=PER_INSTANCE): # Consume the userdata first, because we need want to let the part # handlers run first (for merging stuff) - with events.ReportEventStack( - "consume-user-data", "reading and applying user-data", - parent=self.reporter): + with events.ReportEventStack("consume-user-data", + "reading and applying user-data", + parent=self.reporter): self._consume_userdata(frequency) - with events.ReportEventStack( - "consume-vendor-data", "reading and applying vendor-data", - parent=self.reporter): + with events.ReportEventStack("consume-vendor-data", + "reading and applying vendor-data", + parent=self.reporter): self._consume_vendordata(frequency) # Perform post-consumption adjustments so that @@ -655,7 +655,7 @@ class Modules(object): else: raise TypeError(("Failed to read '%s' item in config," " unknown type %s") % - (item, type_utils.obj_name(item))) + (item, type_utils.obj_name(item))) return module_list def _fixup_modules(self, raw_mods): @@ -762,8 +762,8 @@ class Modules(object): if skipped: LOG.info("Skipping modules %s because they are not verified " - "on distro '%s'. To run anyway, add them to " - "'unverified_modules' in config.", skipped, d_name) + "on distro '%s'. To run anyway, add them to " + "'unverified_modules' in config.", skipped, d_name) if forced: LOG.info("running unverified_modules: %s", forced) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index f2e1390e..936f7da5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -252,9 +252,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # attrs return UrlResponse(r) except exceptions.RequestException as e: - if (isinstance(e, (exceptions.HTTPError)) - and hasattr(e, 'response') # This appeared in v 0.10.8 - and hasattr(e.response, 'status_code')): + if (isinstance(e, (exceptions.HTTPError)) and + hasattr(e, 'response') and # This appeared in v 0.10.8 + hasattr(e.response, 'status_code')): excps.append(UrlError(e, code=e.response.status_code, headers=e.response.headers, url=url)) diff --git a/cloudinit/util.py b/cloudinit/util.py index 45d49e66..de37b0f5 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -612,7 +612,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): def make_url(scheme, host, port=None, - path='', params='', query='', fragment=''): + path='', params='', query='', fragment=''): pieces = [] pieces.append(scheme or '') @@ -804,8 +804,8 @@ def load_yaml(blob, default=None, allowed=(dict,)): blob = decode_binary(blob) try: LOG.debug("Attempting to load yaml from string " - "of length %s with allowed root types %s", - len(blob), allowed) + "of length %s with allowed root types %s", + len(blob), allowed) converted = safeyaml.load(blob) if not isinstance(converted, allowed): # Yes this will just be caught, but thats ok for now... @@ -878,7 +878,7 @@ def read_conf_with_confd(cfgfile): if not isinstance(confd, six.string_types): raise TypeError(("Config file %s contains 'conf_d' " "with non-string type %s") % - (cfgfile, type_utils.obj_name(confd))) + (cfgfile, type_utils.obj_name(confd))) else: confd = str(confd).strip() elif os.path.isdir("%s.d" % cfgfile): @@ -1041,7 +1041,8 @@ def is_resolvable(name): for iname in badnames: try: result = socket.getaddrinfo(iname, None, 0, 0, - socket.SOCK_STREAM, socket.AI_CANONNAME) + socket.SOCK_STREAM, + socket.AI_CANONNAME) badresults[iname] = [] for (_fam, _stype, _proto, cname, sockaddr) in result: badresults[iname].append("%s: %s" % (cname, sockaddr[0])) @@ -1109,7 +1110,7 @@ def close_stdin(): def find_devs_with(criteria=None, oformat='device', - tag=None, no_cache=False, path=None): + tag=None, no_cache=False, path=None): """ find devices matching given criteria (via blkid) criteria can be *one* of: @@ -1628,7 +1629,7 @@ def write_file(filename, content, mode=0o644, omode="wb"): content = decode_binary(content) write_type = 'characters' LOG.debug("Writing to %s - %s: [%s] %s %s", - filename, omode, mode, len(content), write_type) + filename, omode, mode, len(content), write_type) with SeLinuxGuard(path=filename): with open(filename, omode) as fh: fh.write(content) diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index c603bfdb..9c1ec1d4 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -27,11 +27,12 @@ from cloudinit import stages from cloudinit import user_data as ud from cloudinit import util -INSTANCE_ID = "i-testing" - from . import helpers +INSTANCE_ID = "i-testing" + + class FakeDataSource(sources.DataSource): def __init__(self, userdata=None, vendordata=None): diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py index e9cd2fa5..85759c68 100644 --- a/tests/unittests/test_datasource/test_altcloud.py +++ b/tests/unittests/test_datasource/test_altcloud.py @@ -134,8 +134,7 @@ class TestGetCloudType(TestCase): ''' util.read_dmi_data = _dmi_data('RHEV') dsrc = DataSourceAltCloud({}, None, self.paths) - self.assertEquals('RHEV', \ - dsrc.get_cloud_type()) + self.assertEquals('RHEV', dsrc.get_cloud_type()) def test_vsphere(self): ''' @@ -144,8 +143,7 @@ class TestGetCloudType(TestCase): ''' util.read_dmi_data = _dmi_data('VMware Virtual Platform') dsrc = DataSourceAltCloud({}, None, self.paths) - self.assertEquals('VSPHERE', \ - dsrc.get_cloud_type()) + self.assertEquals('VSPHERE', dsrc.get_cloud_type()) def test_unknown(self): ''' @@ -154,8 +152,7 @@ class TestGetCloudType(TestCase): ''' util.read_dmi_data = _dmi_data('Unrecognized Platform') dsrc = DataSourceAltCloud({}, None, self.paths) - self.assertEquals('UNKNOWN', \ - dsrc.get_cloud_type()) + self.assertEquals('UNKNOWN', dsrc.get_cloud_type()) class TestGetDataCloudInfoFile(TestCase): @@ -412,27 +409,27 @@ class TestReadUserDataCallback(TestCase): '''Test read_user_data_callback() with both files.''' self.assertEquals('test user data', - read_user_data_callback(self.mount_dir)) + read_user_data_callback(self.mount_dir)) def test_callback_dc(self): '''Test read_user_data_callback() with only DC file.''' _remove_user_data_files(self.mount_dir, - dc_file=False, - non_dc_file=True) + dc_file=False, + non_dc_file=True) self.assertEquals('test user data', - read_user_data_callback(self.mount_dir)) + read_user_data_callback(self.mount_dir)) def test_callback_non_dc(self): '''Test read_user_data_callback() with only non-DC file.''' _remove_user_data_files(self.mount_dir, - dc_file=True, - non_dc_file=False) + dc_file=True, + non_dc_file=False) self.assertEquals('test user data', - read_user_data_callback(self.mount_dir)) + read_user_data_callback(self.mount_dir)) def test_callback_none(self): '''Test read_user_data_callback() no files are found.''' diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 3933794f..4c9c7d8b 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -207,7 +207,7 @@ class TestAzureDataSource(TestCase): yaml_cfg = "{agent_command: my_command}\n" cfg = yaml.safe_load(yaml_cfg) odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} + 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -219,8 +219,8 @@ class TestAzureDataSource(TestCase): # set dscfg in via base64 encoded yaml cfg = {'agent_command': "my_command"} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(cfg)), - 'encoding': 'base64'}} + 'dscfg': {'text': b64e(yaml.dump(cfg)), + 'encoding': 'base64'}} data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) @@ -267,7 +267,8 @@ class TestAzureDataSource(TestCase): # should equal that after the '$' pos = defuser['passwd'].rfind("$") + 1 self.assertEqual(defuser['passwd'], - crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos])) + crypt.crypt(odata['UserPassword'], + defuser['passwd'][0:pos])) def test_userdata_plain(self): mydata = "FOOBAR" @@ -364,8 +365,8 @@ class TestAzureDataSource(TestCase): # Make sure that user can affect disk aliases dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}} odata = {'HostName': "myhost", 'UserName': "myuser", - 'dscfg': {'text': b64e(yaml.dump(dscfg)), - 'encoding': 'base64'}} + 'dscfg': {'text': b64e(yaml.dump(dscfg)), + 'encoding': 'base64'}} usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'}, 'ephemeral0': False}} userdata = '#cloud-config' + yaml.dump(usercfg) + "\n" @@ -634,7 +635,7 @@ class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) self.assertRaises(DataSourceAzure.BrokenAzureDataSource, - DataSourceAzure.read_azure_ovf, invalid_xml) + DataSourceAzure.read_azure_ovf, invalid_xml) def test_load_with_pubkeys(self): mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}] diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 83aca505..3954ceb3 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -293,9 +293,8 @@ class TestConfigDriveDataSource(TestCase): util.is_partition = my_is_partition devs_with_answers = {"TYPE=vfat": [], - "TYPE=iso9660": ["/dev/vdb"], - "LABEL=config-2": ["/dev/vdb"], - } + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=config-2": ["/dev/vdb"]} self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) # add a vfat item @@ -306,9 +305,10 @@ class TestConfigDriveDataSource(TestCase): # verify that partitions are considered, that have correct label. devs_with_answers = {"TYPE=vfat": ["/dev/sda1"], - "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]} + "TYPE=iso9660": [], + "LABEL=config-2": ["/dev/vdb3"]} self.assertEqual(["/dev/vdb3"], - ds.find_candidate_devs()) + ds.find_candidate_devs()) finally: util.find_devs_with = orig_find_devs_with @@ -319,7 +319,7 @@ class TestConfigDriveDataSource(TestCase): populate_dir(self.tmp, CFG_DRIVE_FILES_V2) myds = cfg_ds_from_dir(self.tmp) self.assertEqual(myds.get_public_ssh_keys(), - [OSTACK_META['public_keys']['mykey']]) + [OSTACK_META['public_keys']['mykey']]) def cfg_ds_from_dir(seed_d): diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py index eb97b692..77d15cac 100644 --- a/tests/unittests/test_datasource/test_maas.py +++ b/tests/unittests/test_datasource/test_maas.py @@ -25,9 +25,9 @@ class TestMAASDataSource(TestCase): """Verify a valid seeddir is read as such.""" data = {'instance-id': 'i-valid01', - 'local-hostname': 'valid01-hostname', - 'user-data': b'valid01-userdata', - 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} + 'local-hostname': 'valid01-hostname', + 'user-data': b'valid01-userdata', + 'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'} my_d = os.path.join(self.tmp, "valid") populate_dir(my_d, data) @@ -45,8 +45,8 @@ class TestMAASDataSource(TestCase): """Verify extra files do not affect seed_dir validity.""" data = {'instance-id': 'i-valid-extra', - 'local-hostname': 'valid-extra-hostname', - 'user-data': b'valid-extra-userdata', 'foo': 'bar'} + 'local-hostname': 'valid-extra-hostname', + 'user-data': b'valid-extra-userdata', 'foo': 'bar'} my_d = os.path.join(self.tmp, "valid_extra") populate_dir(my_d, data) @@ -64,7 +64,7 @@ class TestMAASDataSource(TestCase): """Verify that invalid seed_dir raises MAASSeedDirMalformed.""" valid = {'instance-id': 'i-instanceid', - 'local-hostname': 'test-hostname', 'user-data': ''} + 'local-hostname': 'test-hostname', 'user-data': ''} my_based = os.path.join(self.tmp, "valid_extra") @@ -94,8 +94,8 @@ class TestMAASDataSource(TestCase): def test_seed_dir_missing(self): """Verify that missing seed_dir raises MAASSeedDirNone.""" self.assertRaises(DataSourceMAAS.MAASSeedDirNone, - DataSourceMAAS.read_maas_seed_dir, - os.path.join(self.tmp, "nonexistantdirectory")) + DataSourceMAAS.read_maas_seed_dir, + os.path.join(self.tmp, "nonexistantdirectory")) def test_seed_url_valid(self): """Verify that valid seed_url is read as such.""" diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 1235436d..5e617b83 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -463,8 +463,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): payloadstr = ' {0}'.format(self.response_parts['payload']) return ('V2 {length} {crc} {request_id} ' '{command}{payloadstr}\n'.format( - payloadstr=payloadstr, - **self.response_parts).encode('ascii')) + payloadstr=payloadstr, + **self.response_parts).encode('ascii')) self.metasource_data = None @@ -501,7 +501,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase): written_line = self.serial.write.call_args[0][0] print(type(written_line)) self.assertEndsWith(written_line.decode('ascii'), - b'\n'.decode('ascii')) + b'\n'.decode('ascii')) self.assertEqual(1, written_line.count(b'\n')) def _get_written_line(self, key='some_key'): diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py index 5687b10d..f9660ff6 100644 --- a/tests/unittests/test_handler/test_handler_power_state.py +++ b/tests/unittests/test_handler/test_handler_power_state.py @@ -74,7 +74,7 @@ class TestLoadPowerState(t_help.TestCase): class TestCheckCondition(t_help.TestCase): def cmd_with_exit(self, rc): return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc]) - + def test_true_is_true(self): self.assertEqual(psc.check_condition(True), True) @@ -94,7 +94,6 @@ class TestCheckCondition(t_help.TestCase): self.assertEqual(mocklog.warn.call_count, 1) - def check_lps_ret(psc_return, mode=None): if len(psc_return) != 3: raise TypeError("length returned = %d" % len(psc_return)) diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py index 0bcdcb31..34d11f21 100644 --- a/tests/unittests/test_handler/test_handler_seed_random.py +++ b/tests/unittests/test_handler/test_handler_seed_random.py @@ -190,7 +190,8 @@ class TestRandomSeed(t_help.TestCase): c = self._get_cloud('ubuntu', {}) self.whichdata = {} self.assertRaises(ValueError, cc_seed_random.handle, - 'test', {'random_seed': {'command_required': True}}, c, LOG, []) + 'test', {'random_seed': {'command_required': True}}, + c, LOG, []) def test_seed_command_and_required(self): c = self._get_cloud('ubuntu', {}) diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py index eceb14d9..8aeff53c 100644 --- a/tests/unittests/test_handler/test_handler_snappy.py +++ b/tests/unittests/test_handler/test_handler_snappy.py @@ -125,8 +125,7 @@ class TestInstallPackages(t_help.TestCase): "pkg1.smoser.config": "pkg1.smoser.config-data", "pkg1.config": "pkg1.config-data", "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata", - "pkg2.smoser_0.0_amd64.config": "pkg2.config", - }) + "pkg2.smoser_0.0_amd64.config": "pkg2.config"}) ret = get_package_ops( packages=[], configs={}, installed=[], fspath=self.tmp) diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py index 3b317121..9aeb1cde 100644 --- a/tests/unittests/test_sshutil.py +++ b/tests/unittests/test_sshutil.py @@ -32,7 +32,8 @@ VALID_CONTENT = { ), } -TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding," +TEST_OPTIONS = ( + "no-port-forwarding,no-agent-forwarding,no-X11-forwarding," 'command="echo \'Please login as the user \"ubuntu\" rather than the' 'user \"root\".\';echo;sleep 10"') diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py index 0c19a2c2..b9863650 100644 --- a/tests/unittests/test_templating.py +++ b/tests/unittests/test_templating.py @@ -114,5 +114,6 @@ $a,$b''' codename) out_data = templater.basic_render(in_data, - {'mirror': mirror, 'codename': codename}) + {'mirror': mirror, + 'codename': codename}) self.assertEqual(ex_data, out_data) diff --git a/tools/hacking.py b/tools/hacking.py index 3175df38..1a0631c2 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -47,10 +47,10 @@ def import_normalize(line): # handle "from x import y as z" to "import x.y as z" split_line = line.split() if (line.startswith("from ") and "," not in line and - split_line[2] == "import" and split_line[3] != "*" and - split_line[1] != "__future__" and - (len(split_line) == 4 or - (len(split_line) == 6 and split_line[4] == "as"))): + split_line[2] == "import" and split_line[3] != "*" and + split_line[1] != "__future__" and + (len(split_line) == 4 or + (len(split_line) == 6 and split_line[4] == "as"))): return "import %s.%s" % (split_line[1], split_line[3]) else: return line @@ -74,7 +74,7 @@ def cloud_import_alphabetical(physical_line, line_number, lines): split_line[0] == "import" and split_previous[0] == "import"): if split_line[1] < split_previous[1]: return (0, "N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) + % (split_previous[1], split_line[1])) def cloud_docstring_start_space(physical_line): @@ -87,8 +87,8 @@ def cloud_docstring_start_space(physical_line): pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start if (pos != -1 and len(physical_line) > pos + 1): if (physical_line[pos + 3] == ' '): - return (pos, "N401: one line docstring should not start with" - " a space") + return (pos, + "N401: one line docstring should not start with a space") def cloud_todo_format(physical_line): @@ -167,4 +167,4 @@ if __name__ == "__main__": finally: if len(_missingImport) > 0: print >> sys.stderr, ("%i imports missing in this test environment" - % len(_missingImport)) + % len(_missingImport)) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index dfbc2a71..1c746f17 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -126,11 +126,11 @@ class WebException(Exception): def yamlify(data): formatted = yaml.dump(data, - line_break="\n", - indent=4, - explicit_start=True, - explicit_end=True, - default_flow_style=False) + line_break="\n", + indent=4, + explicit_start=True, + explicit_end=True, + default_flow_style=False) return formatted @@ -282,7 +282,7 @@ class MetaDataHandler(object): else: log.warn(("Did not implement action %s, " "returning empty response: %r"), - action, NOT_IMPL_RESPONSE) + action, NOT_IMPL_RESPONSE) return NOT_IMPL_RESPONSE @@ -404,14 +404,17 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'): def extract_opts(): parser = OptionParser() parser.add_option("-p", "--port", dest="port", action="store", type=int, - default=80, metavar="PORT", - help="port from which to serve traffic (default: %default)") + default=80, metavar="PORT", + help=("port from which to serve traffic" + " (default: %default)")) parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='0.0.0.0', metavar="ADDRESS", - help="address from which to serve traffic (default: %default)") + default='0.0.0.0', metavar="ADDRESS", + help=("address from which to serve traffic" + " (default: %default)")) parser.add_option("-f", '--user-data-file', dest='user_data_file', - action='store', metavar='FILE', - help="user data filename to serve back to incoming requests") + action='store', metavar='FILE', + help=("user data filename to serve back to" + "incoming requests")) (options, args) = parser.parse_args() out = dict() out['extra'] = args diff --git a/tools/run-pep8 b/tools/run-pep8 index ccd6be5a..086400fc 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -1,39 +1,22 @@ #!/bin/bash -if [ $# -eq 0 ]; then - files=( bin/cloud-init $(find * -name "*.py" -type f) ) +pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" ) +# FIXME: cloud-init modifies sys module path, pep8 does not like +# bin_files=( "bin/cloud-init" ) +CR=" +" +[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose="" + +set -f +if [ $# -eq 0 ]; then unset IFS + IFS="$CR" + files=( "${bin_files[@]}" "${pycheck_dirs[@]}" ) + unset IFS else - files=( "$@" ); + files=( "$@" ) fi -if [ -f 'hacking.py' ] -then - base=`pwd` -else - base=`pwd`/tools/ -fi - -IGNORE="" - -# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet. -IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four -IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line -IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation -IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line -IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent -IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent -IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent -IGNORE="$IGNORE,E502" # The backslash is redundant between brackets -IGNORE="${IGNORE#,}" # remove the leading ',' added above - -cmd=( - ${base}/hacking.py - - --ignore="$IGNORE" - - "${files[@]}" -) - -echo -e "\nRunning 'cloudinit' pep8:" -echo "${cmd[@]}" -"${cmd[@]}" +myname=${0##*/} +cmd=( "${myname#run-}" $verbose "${files[@]}" ) +echo "Running: " "${cmd[@]}" 1>&2 +exec "${cmd[@]}" -- cgit v1.2.3 From bbf105baafbe788f7babbda188b513180424e256 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Thu, 3 Mar 2016 16:01:39 -0800 Subject: Resolved all the pep8 errors. Executed ./tools/run-pep8 cloudinit/sources/DataSourceOVF.py and no errors were reported. --- cloudinit/sources/DataSourceOVF.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index d92c128c..d07f6219 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -66,13 +66,14 @@ class DataSourceOVF(sources.DataSource): system_type = util.read_dmi_data("system-product-name") if system_type is None: - LOG.debug("No system-product-name found") + LOG.debug("No system-product-name found") elif 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not util.get_cfg_option_bool(self.sys_cfg, "disable_vmware_customization", True): - deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") + deployPkgPluginPath = search_file("/usr/lib/vmware-tools", + "libdeployPkgPlugin.so") if deployPkgPluginPath: vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, msg="waiting for configuration file", @@ -80,7 +81,8 @@ class DataSourceOVF(sources.DataSource): args=("/tmp", "cust.cfg")) if vmwareImcConfigFilePath: - LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath) + LOG.debug("Found VMware DeployPkg Config File at %s" % + vmwareImcConfigFilePath) else: LOG.debug("Did not find VMware DeployPkg Config File Path") else: @@ -151,7 +153,7 @@ class DataSourceOVF(sources.DataSource): def get_public_ssh_keys(self): if 'public-keys' not in self.metadata: - return [] + return [] pks = self.metadata['public-keys'] if isinstance(pks, (list)): return pks @@ -174,7 +176,7 @@ class DataSourceOVFNet(DataSourceOVF): def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): waited = 0 - + while waited < maxwait: fileFullPath = search_file(dirpath, filename) if fileFullPath: @@ -183,6 +185,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5): waited += naplen return None + # This will return a dict with some content # meta-data, user-data, some config def read_vmware_imc(config): @@ -190,13 +193,14 @@ def read_vmware_imc(config): cfg = {} ud = "" if config.host_name: - if config.domain_name: - md['local-hostname'] = config.host_name + "." + config.domain_name - else: - md['local-hostname'] = config.host_name + if config.domain_name: + md['local-hostname'] = config.host_name + "." + config.domain_name + else: + md['local-hostname'] = config.host_name return (md, ud, cfg) + # This will return a dict with some content # meta-data, user-data, some config def read_ovf_environment(contents): @@ -351,7 +355,7 @@ def get_properties(contents): def search_file(dirpath, filename): if not dirpath or not filename: - return None + return None for root, dirs, files in os.walk(dirpath): if filename in files: @@ -359,6 +363,7 @@ def search_file(dirpath, filename): return None + class XmlError(Exception): pass -- cgit v1.2.3 From 9ec6c876b72ccfa2ae590505fe6dbf7c0c561520 Mon Sep 17 00:00:00 2001 From: Alex Sirbu Date: Mon, 7 Mar 2016 09:33:40 +0000 Subject: Returning false if file does not exist, instead of throwing error --- cloudinit/sources/DataSourceBigstep.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index c22ffdb6..2d66c609 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -5,6 +5,7 @@ # import json +import errno from cloudinit import log as logging from cloudinit import sources @@ -22,7 +23,13 @@ class DataSourceBigstep(sources.DataSource): self.userdata_raw = "" def get_data(self, apply_filter=False): - url = get_url_from_file() + try: + url = get_url_from_file() + except IOError as e: + if e.errno == errno.ENOENT: + return False + else: + raise response = url_helper.readurl(url) decoded = json.loads(response.contents) self.metadata = decoded["metadata"] -- cgit v1.2.3 From d23868d6d3e35a91c348b94ce8416f56514aaf15 Mon Sep 17 00:00:00 2001 From: Alex Sirbu Date: Mon, 7 Mar 2016 12:30:08 +0000 Subject: Implemented review concerning position of try and more information about the caught exception. --- cloudinit/sources/DataSourceBigstep.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py index 2d66c609..b5ee4129 100644 --- a/cloudinit/sources/DataSourceBigstep.py +++ b/cloudinit/sources/DataSourceBigstep.py @@ -23,13 +23,9 @@ class DataSourceBigstep(sources.DataSource): self.userdata_raw = "" def get_data(self, apply_filter=False): - try: - url = get_url_from_file() - except IOError as e: - if e.errno == errno.ENOENT: - return False - else: - raise + url = get_url_from_file() + if url is None: + return False response = url_helper.readurl(url) decoded = json.loads(response.contents) self.metadata = decoded["metadata"] @@ -39,7 +35,15 @@ class DataSourceBigstep(sources.DataSource): def get_url_from_file(): - content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") + try: + content = util.load_file("/var/lib/cloud/data/seed/bigstep/url") + except IOError as e: + # If the file doesn't exist, then the server probably isn't a Bigstep + # instance; otherwise, another problem exists which needs investigation + if e.errno == errno.ENOENT: + return None + else: + raise return content # Used to match classes to dependencies -- cgit v1.2.3 From ef7368ef61c47fbb0bc03e6e7a5bc4571d492baf Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 8 Mar 2016 12:41:08 -0800 Subject: - Ignored return code 1 for 'pkill' command in config_nic.py - Added few utility functions to report events to the underlying VMware Virtualization platform - Re-factored code little bit. - Executed ./tools/run-pep8 and no pep8 errors were reported. --- cloudinit/sources/DataSourceOVF.py | 40 +++++++++-- cloudinit/sources/helpers/vmware/imc/config_nic.py | 14 ++-- .../sources/helpers/vmware/imc/guestcust_error.py | 24 +++++++ .../sources/helpers/vmware/imc/guestcust_event.py | 27 ++++++++ .../sources/helpers/vmware/imc/guestcust_state.py | 25 +++++++ .../sources/helpers/vmware/imc/guestcust_util.py | 79 ++++++++++++++++++++++ 6 files changed, 198 insertions(+), 11 deletions(-) create mode 100644 cloudinit/sources/helpers/vmware/imc/guestcust_error.py create mode 100644 cloudinit/sources/helpers/vmware/imc/guestcust_event.py create mode 100644 cloudinit/sources/helpers/vmware/imc/guestcust_state.py create mode 100644 cloudinit/sources/helpers/vmware/imc/guestcust_util.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index d07f6219..0fbdf0b8 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -34,6 +34,14 @@ from cloudinit import util from cloudinit.sources.helpers.vmware.imc.config import Config from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator +from cloudinit.sources.helpers.vmware.imc.guestcust_event import \ + GuestCustEventEnum +from cloudinit.sources.helpers.vmware.imc.guestcust_state import \ + GuestCustStateEnum +from cloudinit.sources.helpers.vmware.imc.guestcust_error import \ + GuestCustErrorEnum +from cloudinit.sources.helpers.vmware.imc.guestcust_util import \ + set_customization_status LOG = logging.getLogger(__name__) @@ -74,6 +82,9 @@ class DataSourceOVF(sources.DataSource): True): deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so") + if not deployPkgPluginPath: + deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", + "libdeployPkgPlugin.so") if deployPkgPluginPath: vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug, msg="waiting for configuration file", @@ -93,14 +104,33 @@ class DataSourceOVF(sources.DataSource): cf = ConfigFile(vmwareImcConfigFilePath) conf = Config(cf) (md, ud, cfg) = read_vmware_imc(conf) - nicConfigurator = NicConfigurator(conf.nics) - nicConfigurator.configure() - vmwarePlatformFound = True - except Exception as inst: - LOG.debug("Error while parsing the Customization Config File") + except Exception as e: + LOG.debug("Error parsing the customization Config File") + LOG.exception(e) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) + return False finally: dirPath = os.path.dirname(vmwareImcConfigFilePath) shutil.rmtree(dirPath) + + try: + LOG.debug("Applying the Network customization") + nicConfigurator = NicConfigurator(conf.nics) + nicConfigurator.configure() + except Exception as e: + LOG.debug("Error applying the Network Configuration") + LOG.exception(e) + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) + return False + + vmwarePlatformFound = True + set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) elif seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py index 172a1649..42fbcc7e 100644 --- a/cloudinit/sources/helpers/vmware/imc/config_nic.py +++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py @@ -47,12 +47,12 @@ class NicConfigurator: """ primary_nics = [nic for nic in self.nics if nic.primary] if not primary_nics: - return None + return None elif len(primary_nics) > 1: - raise Exception('There can only be one primary nic', + raise Exception('There can only be one primary nic', [nic.mac for nic in primary_nics]) else: - return primary_nics[0] + return primary_nics[0] def find_devices(self): """ @@ -186,8 +186,9 @@ class NicConfigurator: lines = [] for addr in addrs: - lines.append(' up route -A inet6 add default gw %s metric 10000' % - addr.gateway) + lines.append( + ' up route -A inet6 add default gw %s metric 10000' % + addr.gateway) return lines @@ -206,7 +207,8 @@ class NicConfigurator: def clear_dhcp(self): logger.info('Clearing DHCP leases') - util.subp(["pkill", "dhclient"]) + # Ignore the return code 1. + util.subp(["pkill", "dhclient"], rcs=[0, 1]) util.subp(["rm", "-f", "/var/lib/dhcp/*"]) def if_down_up(self): diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py new file mode 100644 index 00000000..1b04161f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py @@ -0,0 +1,24 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# Copyright (C) 2016 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class GuestCustErrorEnum: + """Specifies different errors of Guest Customization engine""" + + GUESTCUST_ERROR_SUCCESS = 0 diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py new file mode 100644 index 00000000..fc22568f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py @@ -0,0 +1,27 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# Copyright (C) 2016 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class GuestCustEventEnum: + """Specifies different types of Guest Customization Events""" + + GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100 + GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101 + GUESTCUST_EVENT_ENABLE_NICS = 103 + GUESTCUST_EVENT_QUERY_NICS = 104 diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py new file mode 100644 index 00000000..f255be5f --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py @@ -0,0 +1,25 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# Copyright (C) 2016 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +class GuestCustStateEnum: + """Specifies different states of Guest Customization engine""" + + GUESTCUST_STATE_RUNNING = 4 + GUESTCUST_STATE_DONE = 5 diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py new file mode 100644 index 00000000..2466a47e --- /dev/null +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -0,0 +1,79 @@ +# vi: ts=4 expandtab +# +# Copyright (C) 2016 Canonical Ltd. +# Copyright (C) 2016 VMware Inc. +# +# Author: Sankar Tanguturi +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 3, as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import logging +import os + +from cloudinit import util + + +logger = logging.getLogger(__name__) + + +CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log" + + +# This will send a RPC command to the underlying +# VMware Virtualization Platform. +def send_rpc(rpc): + if not rpc: + return None + + rc = 1 + output = "Error sending the RPC command" + + try: + logger.debug("Sending RPC command: %s", rpc) + (rc, output) = util.subp(["vmware-rpctool", rpc], rcs=[0]) + except Exception as e: + logger.debug("Failed to send RPC command") + logger.exception(e) + + return (rc, output) + + +# This will send the customization status to the +# underlying VMware Virtualization Platform. +def set_customization_status(custstate, custerror, errormessage=None): + message = "" + + if errormessage: + message = CLOUDINIT_LOG_FILE + "@" + errormessage + else: + message = CLOUDINIT_LOG_FILE + + rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message) + (rc, output) = send_rpc(rpc) + + +# This will read the file nics.txt in the specified directory +# and return the content +def get_nics_to_enable(dirpath): + if not dirpath: + return None + + NICS_SIZE = 1024 + nicsfilepath = os.path.join(dirpath, "nics.txt") + if not os.path.exists(nicsfilepath): + return None + + with open(nicsfilepath, 'r') as fp: + nics = fp.read(NICS_SIZE) + + return nics -- cgit v1.2.3 From a6e0922a4d34ede6df000dd8fc4bb3531218d69f Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Wed, 9 Mar 2016 16:02:34 -0800 Subject: - Fixed few issues with return values form util.subp() - Added a new utility method to send a RPC for enabling NICS - Modified DataSourceOVF.py to enable nics. - Executed ./tools/run-pep8 and no issues were reported. --- cloudinit/sources/DataSourceOVF.py | 11 +++- .../sources/helpers/vmware/imc/guestcust_util.py | 60 ++++++++++++++++++++-- 2 files changed, 64 insertions(+), 7 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 0fbdf0b8..bc13b71a 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -40,8 +40,11 @@ from cloudinit.sources.helpers.vmware.imc.guestcust_state import \ GuestCustStateEnum from cloudinit.sources.helpers.vmware.imc.guestcust_error import \ GuestCustErrorEnum -from cloudinit.sources.helpers.vmware.imc.guestcust_util import \ - set_customization_status +from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( + set_customization_status, + get_nics_to_enable, + enable_nics +) LOG = logging.getLogger(__name__) @@ -100,10 +103,13 @@ class DataSourceOVF(sources.DataSource): LOG.debug("Customization for VMware platform is disabled.") if vmwareImcConfigFilePath: + nics = "" try: cf = ConfigFile(vmwareImcConfigFilePath) conf = Config(cf) (md, ud, cfg) = read_vmware_imc(conf) + dirpath = os.path.dirname(vmwareImcConfigFilePath) + nics = get_nics_to_enable(dirpath) except Exception as e: LOG.debug("Error parsing the customization Config File") LOG.exception(e) @@ -128,6 +134,7 @@ class DataSourceOVF(sources.DataSource): return False vmwarePlatformFound = True + enable_nics(nics) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index 2466a47e..b8c58f1e 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -19,14 +19,20 @@ import logging import os +import time from cloudinit import util +from .guestcust_state import GuestCustStateEnum +from .guestcust_error import GuestCustErrorEnum +from .guestcust_event import GuestCustEventEnum logger = logging.getLogger(__name__) CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log" +QUERY_NICS_SUPPORTED = "queryNicsSupported" +NICS_STATUS_CONNECTED = "connected" # This will send a RPC command to the underlying @@ -35,17 +41,20 @@ def send_rpc(rpc): if not rpc: return None - rc = 1 - output = "Error sending the RPC command" + out = "" + err = "Error sending the RPC command" try: logger.debug("Sending RPC command: %s", rpc) - (rc, output) = util.subp(["vmware-rpctool", rpc], rcs=[0]) + (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0]) + # Remove the trailing newline in the output. + if out: + out = out.rstrip() except Exception as e: logger.debug("Failed to send RPC command") logger.exception(e) - return (rc, output) + return (out, err) # This will send the customization status to the @@ -59,7 +68,8 @@ def set_customization_status(custstate, custerror, errormessage=None): message = CLOUDINIT_LOG_FILE rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message) - (rc, output) = send_rpc(rpc) + (out, err) = send_rpc(rpc) + return (out, err) # This will read the file nics.txt in the specified directory @@ -77,3 +87,43 @@ def get_nics_to_enable(dirpath): nics = fp.read(NICS_SIZE) return nics + + +# This will send a RPC command to the underlying VMware Virtualization platform +# and enable nics. +def enable_nics(nics): + if not nics: + logger.warning("No Nics found") + return + + enableNicsWaitRetries = 5 + enableNicsWaitCount = 5 + enableNicsWaitSeconds = 1 + + for attempt in range(0, enableNicsWaitRetries): + logger.debug("Trying to connect interfaces, attempt %d", attempt) + (out, err) = set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, + nics) + if not out: + time.sleep(enableNicsWaitCount * enableNicsWaitSeconds) + continue + + if out != QUERY_NICS_SUPPORTED: + logger.warning("NICS connection status query is not supported") + return + + for count in range(0, enableNicsWaitCount): + (out, err) = set_customization_status( + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, + nics) + if out and out == NICS_STATUS_CONNECTED: + logger.info("NICS are connected on %d second", count) + return + + time.sleep(enableNicsWaitSeconds) + + logger.warning("Can't connect network interfaces after %d attempts", + enableNicsWaitRetries) -- cgit v1.2.3 From 781ded8127deefb49a8806e49bdb7bb6e4d4b245 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 10 Mar 2016 21:43:46 -0500 Subject: commit planned implementation of datasourcenocloud this adds the consumption of 'network-config' to the datasourcenocloud. There is an implementation of the network rendering taht is untested in distros/debian. --- cloudinit/distros/__init__.py | 11 ++++++++ cloudinit/distros/debian.py | 10 +++++++ cloudinit/sources/DataSourceNoCloud.py | 49 ++++++++++++++++++++++------------ 3 files changed, 53 insertions(+), 17 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index a73acae5..461253a7 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -75,6 +75,9 @@ class Distro(object): # to write this blob out in a distro format raise NotImplementedError() + def _write_network_config(self, settings): + raise NotImplementedError() + def _find_tz_file(self, tz): tz_file = os.path.join(self.tz_zone_dir, str(tz)) if not os.path.isfile(tz_file): @@ -132,6 +135,14 @@ class Distro(object): return self._bring_up_interfaces(dev_names) return False + def apply_network_config(self, netconfig, bring_up=True): + # Write it out + dev_names = self._write_network_config(netconfig) + # Now try to bring them up + if bring_up: + return self._bring_up_interfaces(dev_names) + return False + @abc.abstractmethod def apply_locale(self, locale, out_fn=None): raise NotImplementedError() diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index db5890b1..89d8d28e 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -26,6 +26,8 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import log as logging from cloudinit import util +from cloudinit import net +from cloudinit.net import network_state from cloudinit.distros.parsers.hostname import HostnameConf @@ -76,6 +78,14 @@ class Distro(distros.Distro): util.write_file(self.network_conf_fn, settings) return ['all'] + def _write_network_config(self, netconfig): + # TODO: THIS IS NOT TESTED + state = network_state.NetworkState() + state.load(netconfig) + state.parse_config() + net.render_network_state("/", state) + return ['all'] + def _bring_up_interfaces(self, device_names): use_all = False for d in device_names: diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 4cad6877..e00210e7 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -50,21 +50,22 @@ class DataSourceNoCloud(sources.DataSource): } found = [] - mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""} + mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", + 'network-config': {}} try: # Parse the kernel command line, getting data passed in md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") - mydata['meta-data'].update(md) + mydata = _merge_new_seed({'meta-data': md}) except: util.logexc(LOG, "Unable to parse command line data") return False # Check to see if the seed dir has data. pp2d_kwargs = {'required': ['user-data', 'meta-data'], - 'optional': ['vendor-data']} + 'optional': ['vendor-data', 'network-config']} try: seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs) @@ -141,8 +142,7 @@ class DataSourceNoCloud(sources.DataSource): if len(found) == 0: return False - seeded_interfaces = None - + seeded_network = None # The special argument "seedfrom" indicates we should # attempt to seed the userdata / metadata from its value # its primarily value is in allowing the user to type less @@ -158,8 +158,9 @@ class DataSourceNoCloud(sources.DataSource): LOG.debug("Seed from %s not supported by %s", seedfrom, self) return False - if 'network-interfaces' in mydata['meta-data']: - seeded_interfaces = self.dsmode + if (mydata['meta-data'].get('network-interfaces') or + mydata.get('network-config')): + seeded_network = self.dsmode # This could throw errors, but the user told us to do it # so if errors are raised, let them raise @@ -176,15 +177,25 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data'] = util.mergemanydict([mydata['meta-data'], defaults]) - # Update the network-interfaces if metadata had 'network-interfaces' - # entry and this is the local datasource, or 'seedfrom' was used - # and the source of the seed was self.dsmode - # ('local' for NoCloud, 'net' for NoCloudNet') - if ('network-interfaces' in mydata['meta-data'] and - (self.dsmode in ("local", seeded_interfaces))): - LOG.debug("Updating network interfaces from %s", self) - self.distro.apply_network( - mydata['meta-data']['network-interfaces']) + netdata = {'format': None, 'data': None} + if mydata['meta-data'].get('network-interfaces'): + netdata['format'] = 'interfaces' + netdata['data'] = mydata['meta-data']['network-interfaces'] + elif mydata.get('network-config'): + netdata['format'] = 'network-config' + netdata['data'] = mydata['network-config'] + + # if this is the local datasource or 'seedfrom' was used + # and the source of the seed was self.dsmode. + # Then see if there is network config to apply. + if self.dsmode in ("local", seeded_network): + if mydata['meta-data'].get('network-interfaces'): + LOG.debug("Updating network interfaces from %s", self) + self.distro.apply_network( + mydata['meta-data']['network-interfaces']) + elif mydata.get('network-config'): + LOG.debug("Updating network config from %s", self) + self.distro.apply_network_config(mydata['network-config']) if mydata['meta-data']['dsmode'] == self.dsmode: self.seed = ",".join(found) @@ -246,7 +257,11 @@ def _merge_new_seed(cur, seeded): ret = cur.copy() ret['meta-data'] = util.mergemanydict([cur['meta-data'], util.load_yaml(seeded['meta-data'])]) - ret['user-data'] = seeded['user-data'] + if seeded.get('network-config'): + ret['network-config'] = util.load_yaml(seeded['network-config']) + + if 'user-data' in seeded: + ret['user-data'] = seeded['user-data'] if 'vendor-data' in seeded: ret['vendor-data'] = seeded['vendor-data'] return ret -- cgit v1.2.3 From 24a5e31f5ad96cde75315ed488b6d5a011533936 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 11 Mar 2016 16:07:49 -0500 Subject: minor changes use the helpers in cloudinit/net functional --- cloudinit/distros/debian.py | 14 +++----------- cloudinit/sources/DataSourceNoCloud.py | 3 ++- 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py index 24545fd4..36a844f1 100644 --- a/cloudinit/distros/debian.py +++ b/cloudinit/distros/debian.py @@ -44,14 +44,6 @@ APT_GET_WRAPPER = { } -def render_network_config(config, target="/"): - version = config['version'] - config = config['config'] - ns = network_state.NetworkState(version=version, config=config) - ns.parse_config() - net.render_network_state(target, ns.network_state) - - class Distro(distros.Distro): hostname_conf_fn = "/etc/hostname" locale_conf_fn = "/etc/default/locale" @@ -87,9 +79,9 @@ class Distro(distros.Distro): return ['all'] def _write_network_config(self, netconfig): - # TODO: THIS IS NOT TESTED - render_network_config(netconfig) - return ['all'] + ns = net.parse_net_config_data(netconfig) + net.render_network_state(network_state=ns, target="/") + return [] def _bring_up_interfaces(self, device_names): use_all = False diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index e00210e7..a3532463 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -195,7 +195,8 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data']['network-interfaces']) elif mydata.get('network-config'): LOG.debug("Updating network config from %s", self) - self.distro.apply_network_config(mydata['network-config']) + self.distro.apply_network_config(mydata['network-config'], + bring_up=False) if mydata['meta-data']['dsmode'] == self.dsmode: self.seed = ",".join(found) -- cgit v1.2.3 From 03998cd336b3906dc1eb675fff1ddeb1272668d3 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Fri, 11 Mar 2016 13:29:28 -0800 Subject: - Fixed few pep8 and flake8 issues. - Changed the really long 'from ... import ...' statements. --- cloudinit/sources/DataSourceOVF.py | 21 +++++++++------------ .../sources/helpers/vmware/imc/guestcust_util.py | 13 ++++++------- 2 files changed, 15 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index fec13b93..65cefc48 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -31,16 +31,13 @@ import time from cloudinit import log as logging from cloudinit import sources from cloudinit import util -from cloudinit.sources.helpers.vmware.imc.config import Config -from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile -from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator -from cloudinit.sources.helpers.vmware.imc.guestcust_event import \ - GuestCustEventEnum -from cloudinit.sources.helpers.vmware.imc.guestcust_state import \ - GuestCustStateEnum -from cloudinit.sources.helpers.vmware.imc.guestcust_error import \ - GuestCustErrorEnum -from cloudinit.sources.helpers.vmware.imc.guestcust_util import ( +from .helpers.vmware.imc.config import Config +from .helpers.vmware.imc.config_file import ConfigFile +from .helpers.vmware.imc.config_nic import NicConfigurator +from .helpers.vmware.imc.guestcust_event import GuestCustEventEnum +from .helpers.vmware.imc.guestcust_state import GuestCustStateEnum +from .helpers.vmware.imc.guestcust_error import GuestCustErrorEnum +from .helpers.vmware.imc.guestcust_util import ( set_customization_status, get_nics_to_enable, enable_nics @@ -135,8 +132,8 @@ class DataSourceOVF(sources.DataSource): vmwarePlatformFound = True enable_nics(nics) set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_DONE, - GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) + GuestCustStateEnum.GUESTCUST_STATE_DONE, + GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) elif seedfile: # Found a seed dir seed = os.path.join(self.paths.seed_dir, seedfile) diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py index b8c58f1e..d39f0a65 100644 --- a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py +++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py @@ -24,7 +24,6 @@ import time from cloudinit import util from .guestcust_state import GuestCustStateEnum -from .guestcust_error import GuestCustErrorEnum from .guestcust_event import GuestCustEventEnum logger = logging.getLogger(__name__) @@ -103,9 +102,9 @@ def enable_nics(nics): for attempt in range(0, enableNicsWaitRetries): logger.debug("Trying to connect interfaces, attempt %d", attempt) (out, err) = set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, - nics) + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS, + nics) if not out: time.sleep(enableNicsWaitCount * enableNicsWaitSeconds) continue @@ -116,9 +115,9 @@ def enable_nics(nics): for count in range(0, enableNicsWaitCount): (out, err) = set_customization_status( - GuestCustStateEnum.GUESTCUST_STATE_RUNNING, - GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, - nics) + GuestCustStateEnum.GUESTCUST_STATE_RUNNING, + GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS, + nics) if out and out == NICS_STATUS_CONNECTED: logger.info("NICS are connected on %d second", count) return -- cgit v1.2.3 From 13a32d7599a939370ee0bc0e7257da2c59b4bd61 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 15 Mar 2016 17:22:08 -0700 Subject: - Added the code to customize timezone. --- cloudinit/sources/DataSourceOVF.py | 3 +++ 1 file changed, 3 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 65cefc48..5734d233 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -231,6 +231,9 @@ def read_vmware_imc(config): else: md['local-hostname'] = config.host_name + if config.timezone: + cfg['timezone'] = config.timezone + return (md, ud, cfg) -- cgit v1.2.3 From 1dd9102afda920d486a144b3153d6c9951f45cf9 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 16 Mar 2016 21:06:28 -0400 Subject: fix regression when command line (ds=nocloud) is present parsing the command line parameters returned a dictionary but _merge_new_seed was expecting a string to be yaml loaded. Change is to make _merge_new_seed take either string or dict. --- cloudinit/sources/DataSourceNoCloud.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index a3532463..64853385 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -58,7 +58,7 @@ class DataSourceNoCloud(sources.DataSource): md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") - mydata = _merge_new_seed({'meta-data': md}) + mydata = _merge_new_seed(mydata, {'meta-data': md}) except: util.logexc(LOG, "Unable to parse command line data") return False @@ -256,8 +256,12 @@ def parse_cmdline_data(ds_id, fill, cmdline=None): def _merge_new_seed(cur, seeded): ret = cur.copy() - ret['meta-data'] = util.mergemanydict([cur['meta-data'], - util.load_yaml(seeded['meta-data'])]) + + newmd = seeded.get('meta-data', {}) + if not isinstance(seeded['meta-data'], dict): + newmd = util.load_yaml(seeded['meta-data']) + ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd]) + if seeded.get('network-config'): ret['network-config'] = util.load_yaml(seeded['network-config']) -- cgit v1.2.3 From 0f187dd7035ac724912ea5c877f6bff1bea6fe57 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Sun, 20 Mar 2016 19:49:53 -0700 Subject: Misc fixes for VMware Support. - Modified the code to look for customization specification file in /var/run/vmware-imc/ directory instead of /tmp - Fixed the 'seed file' issue. There was a regression in DataSourceOVF.py file. Fixed it. --- cloudinit/sources/DataSourceOVF.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 5734d233..fc12cbb4 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -75,7 +75,14 @@ class DataSourceOVF(sources.DataSource): system_type = util.read_dmi_data("system-product-name") if system_type is None: LOG.debug("No system-product-name found") - elif 'vmware' in system_type.lower(): + + if seedfile: + # Found a seed dir + seed = os.path.join(self.paths.seed_dir, seedfile) + (md, ud, cfg) = read_ovf_environment(contents) + self.environment = contents + found.append(seed) + elif system_type and 'vmware' in system_type.lower(): LOG.debug("VMware Virtualization Platform found") if not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): @@ -88,7 +95,8 @@ class DataSourceOVF(sources.DataSource): vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", - func=wait_for_imc_cfg_file, args=("/tmp", "cust.cfg")) + func=wait_for_imc_cfg_file, + args=("/var/run/vmware-imc", "cust.cfg")) if vmwareImcConfigFilePath: LOG.debug("Found VMware DeployPkg Config File at %s" % @@ -134,12 +142,6 @@ class DataSourceOVF(sources.DataSource): set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) - elif seedfile: - # Found a seed dir - seed = os.path.join(self.paths.seed_dir, seedfile) - (md, ud, cfg) = read_ovf_environment(contents) - self.environment = contents - found.append(seed) else: np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } -- cgit v1.2.3 From 0964b42e5117cce640a8ba9102a76fa54a698898 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 21 Mar 2016 21:47:24 -0400 Subject: quickly check to see if the previous instance id is still valid This adds a check in cloud-init to see if the existing (cached) datasource is still valid. It relies on support from the Datasource to implement 'check_instance_id'. That method should quickly determine (if possible) if the instance id found in the datasource is still valid. This means that we can still notice new instance ids without depending on a network datasource on every boot. I've also implemented check_instance_id for the superclass and for 3 classes: DataSourceAzure (check dmi data) DataSourceOpenstack (check dmi data) DataSourceNocloud (check the seeded data or kernel command line) LP: #1553815 --- ChangeLog | 2 ++ bin/cloud-init | 19 ++++++++--------- cloudinit/sources/DataSourceAzure.py | 4 ++++ cloudinit/sources/DataSourceNoCloud.py | 35 ++++++++++++++++++++++++++++++++ cloudinit/sources/DataSourceOpenStack.py | 4 ++++ cloudinit/sources/__init__.py | 16 +++++++++++++++ cloudinit/stages.py | 24 ++++++++++++++-------- 7 files changed, 85 insertions(+), 19 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index 0ec4f49e..b08665b0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -92,6 +92,8 @@ - doc: mention label for nocloud datasource must be 'cidata' [Peter Hurley] - ssh_pwauth: fix module to support 'unchanged' and match behavior described in documentation [Chris Cosby] + - quickly check to see if the previous instance id is still valid to + avoid dependency on network metadata service on every boot (LP: #1553815) 0.7.6: - open 0.7.6 diff --git a/bin/cloud-init b/bin/cloud-init index 7f665e7e..11cc0237 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -212,6 +212,7 @@ def main_init(name, args): # Stage 4 path_helper = init.paths if not args.local: + existing = "trust" sys.stderr.write("%s\n" % (netinfo.debug_info())) LOG.debug(("Checking to see if files that we need already" " exist from a previous run that would allow us" @@ -236,21 +237,17 @@ def main_init(name, args): LOG.debug("Execution continuing, no previous run detected that" " would allow us to stop early.") else: - # The cache is not instance specific, so it has to be purged - # but we want 'start' to benefit from a cache if - # a previous start-local populated one... - manual_clean = util.get_cfg_option_bool(init.cfg, - 'manual_cache_clean', False) - if manual_clean: - LOG.debug("Not purging instance link, manual cleaning enabled") - init.purge_cache(False) - else: - init.purge_cache() + existing = "check" + if util.get_cfg_option_bool(init.cfg, 'manual_cache_clean', False): + existing = "trust" + + init.purge_cache() # Delete the non-net file as well util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net")) + # Stage 5 try: - init.fetch() + init.fetch(existing=existing) except sources.DataSourceNotFoundException: # In the case of 'cloud-init init' without '--local' it is a bit # more likely that the user would consider it failure if nothing was diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 2af0ad9b..832b3063 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -254,6 +254,10 @@ class DataSourceAzureNet(sources.DataSource): def get_config_obj(self): return self.cfg + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def count_files(mp): return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 4cad6877..d07e6f84 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -197,6 +197,41 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data']['dsmode']) return False + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still valid + # we check kernel command line or files. + current = self.get_instance_id() + if not current: + return None + + quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, + dirs=[self.seed_dir]) + if not quick_id: + return None + return quick_id == current + + +def _quick_read_instance_id(cmdline_id, dirs=None): + if dirs is None: + dirs = [] + + iid_key = 'instance-id' + if cmdline_id is None: + fill = {} + if parse_cmdline_data(cmdline_id, fill) and iid_key in fill: + return fill[iid_key] + + for d in dirs: + try: + data = util.pathprefix2dict(d, required=['meta-data']) + md = util.load_yaml(data['meta-data']) + if iid_key in md: + return md[iid_key] + except ValueError: + pass + + return None + # Returns true or false indicating if cmdline indicated # that this module should be used diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 469c2e2a..79bb9d63 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -150,6 +150,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + def read_metadata_service(base_url, ssl_details=None): reader = openstack.MetadataReader(base_url, ssl_details=ssl_details) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index d3cfa560..28540a7b 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -217,6 +217,10 @@ class DataSource(object): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still + return False + def normalize_pubkey_data(pubkey_data): keys = [] @@ -299,6 +303,18 @@ def list_sources(cfg_list, depends, pkg_list): return src_list +def instance_id_matches_system_uuid(instance_id, field='system-uuid'): + # quickly (local check only) if self.instance_id is still valid + # we check kernel command line or files. + if not instance_id: + return False + + dmi_value = util.read_dmi_data(field) + if not dmi_value: + return False + return instance_id.lower() == dmi_value.lower() + + # 'depends' is a list of dependencies (DEP_FILESYSTEM) # ds_list is a list of 2 item lists # ds_list = [ diff --git a/cloudinit/stages.py b/cloudinit/stages.py index dbcf3d55..edad6450 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -140,7 +140,7 @@ class Init(object): ] return initial_dirs - def purge_cache(self, rm_instance_lnk=True): + def purge_cache(self, rm_instance_lnk=False): rm_list = [] rm_list.append(self.paths.boot_finished) if rm_instance_lnk: @@ -238,21 +238,29 @@ class Init(object): cfg_list = self.cfg.get('datasource_list') or [] return (cfg_list, pkg_list) - def _get_data_source(self): + def _get_data_source(self, existing): if self.datasource is not NULL_DATA_SOURCE: return self.datasource with events.ReportEventStack( name="check-cache", - description="attempting to read from cache", + description="attempting to read from cache [%s]" % existing, parent=self.reporter) as myrep: ds = self._restore_from_cache() - if ds: - LOG.debug("Restored from cache, datasource: %s", ds) - myrep.description = "restored from cache" + if ds and existing == "trust": + myrep.description = "restored from cache: %s" % ds + elif ds and existing == "check": + if hasattr(ds, 'check_instance_id') and ds.check_instance_id(): + myrep.description = "restored from checked cache: %s" % ds + else: + myrep.description = "cache invalid in datasource: %s" % ds + ds = None else: myrep.description = "no cache found" + LOG.debug(myrep.description) + if not ds: + util.del_file(self.paths.instance_link) (cfg_list, pkg_list) = self._get_datasources() # Deep copy so that user-data handlers can not modify # (which will affect user-data handlers down the line...) @@ -332,8 +340,8 @@ class Init(object): self._reset() return iid - def fetch(self): - return self._get_data_source() + def fetch(self, existing="check"): + return self._get_data_source(existing=existing) def instancify(self): return self._reflect_cur_instance() -- cgit v1.2.3 From 8968f570787c0889f2c8b363e208e018903b63fa Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 21 Mar 2016 22:22:53 -0400 Subject: add check_instance_id to ConfigDrive --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index e3916208..6fc9e05b 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -146,6 +146,10 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): return True + def check_instance_id(self): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): -- cgit v1.2.3 From 9c0a2abc8d2c0e390745ddb163f5eae07b20d61d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 Mar 2016 03:50:28 -0400 Subject: add code to invoke networking config there is no data source that has a populated network_config() so at this point this doesn't do anything. --- bin/cloud-init | 4 ++++ cloudinit/distros/__init__.py | 2 +- cloudinit/net/__init__.py | 17 +++++++++++++++++ cloudinit/sources/__init__.py | 4 ++++ cloudinit/stages.py | 24 ++++++++++++++++++++++++ 5 files changed, 50 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/bin/cloud-init b/bin/cloud-init index 63aa765b..8875d2f6 100755 --- a/bin/cloud-init +++ b/bin/cloud-init @@ -263,6 +263,10 @@ def main_init(name, args): return (None, []) else: return (None, ["No instance datasource found."]) + + if args.local: + init.apply_network_config() + # Stage 6 iid = init.instancify() LOG.debug("%s will now be targeting instance id: %s", name, iid) diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 74b484a7..418421b9 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -135,7 +135,7 @@ class Distro(object): return self._bring_up_interfaces(dev_names) return False - def apply_network_config(self, netconfig, bring_up=True): + def apply_network_config(self, netconfig, bring_up=False): # Write it out dev_names = self._write_network_config(netconfig) # Now try to bring them up diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 3cf99604..799cb97e 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -434,4 +434,21 @@ def render_network_state(target, network_state): with open(netrules, 'w+') as f: f.write(render_persistent_net(network_state)) + +def is_disabled_cfg(cfg): + if not cfg or not isinstance(cfg, dict): + return False + return cfg.get('config') == "disabled" + + +def generate_fallback_config(): + # FIXME: add implementation here + return None + + +def read_kernel_cmdline_config(): + # FIXME: add implementation here + return None + + # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index 28540a7b..c63464b2 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -221,6 +221,10 @@ class DataSource(object): # quickly (local check only) if self.instance_id is still return False + @property + def network_config(self): + return None + def normalize_pubkey_data(pubkey_data): keys = [] diff --git a/cloudinit/stages.py b/cloudinit/stages.py index c230ec0d..8e681e29 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -43,6 +43,7 @@ from cloudinit import distros from cloudinit import helpers from cloudinit import importer from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit import type_utils from cloudinit import util @@ -567,6 +568,29 @@ class Init(object): # Run the handlers self._do_handlers(user_data_msg, c_handlers_list, frequency) + def _find_networking_config(self): + cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config()) + dscfg = ('ds', None) + if self.datasource and hasattr(self.datasource, 'network_config'): + dscfg = ('ds', self.datasource.network_config) + sys_cfg = ('system_cfg', self.cfg.get('network')) + + for loc, ncfg in (cmdline_cfg, dscfg, sys_cfg): + if net.is_disabled_cfg(ncfg): + LOG.debug("network config disabled by %s", loc) + return None + if ncfg: + return ncfg + return net.generate_fallback_config() + + def apply_network_config(self): + netcfg = self._find_networking_config() + if netcfg is None: + LOG.info("network config is disabled") + return + + return self.distro.apply_network_config(netcfg) + class Modules(object): def __init__(self, init, cfg_files=None, reporter=None): -- cgit v1.2.3 From ca00b0f1f8c8a40409328c595d44234bb61c24c4 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 Mar 2016 04:49:34 -0400 Subject: make NoCloud work for seeding network. Tested now with the generated fallback config in an lxc container. Had to change to return a config rather than a network state. Also this makes nocloud look in nocloud-net's seed dir. This way it will read the seed and clame the datasource but not do anything other than apply networking and the init_modules early. It is a change in behavior of the time that boothooks woudl run to do this. May need to change that back. --- cloudinit/net/__init__.py | 20 +++++--------------- cloudinit/sources/DataSourceNoCloud.py | 34 +++++++++++++++++++--------------- cloudinit/stages.py | 1 + 3 files changed, 25 insertions(+), 30 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index b45153f4..63fad2fa 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -448,11 +448,7 @@ def generate_fallback_config(): """Determine which attached net dev is most likely to have a connection and generate network state to run dhcp on that interface""" # by default use eth0 as primary interface - nconf = {'config': {'interfaces': {}, - 'dns': {'search': [], 'nameservers': []}, 'routes': [] - }, - 'version': 1 - } + nconf = {'config': [], 'version': 1} # get list of interfaces that could have connections invalid_interfaces = set(['lo']) @@ -506,21 +502,15 @@ def generate_fallback_config(): if DEFAULT_PRIMARY_INTERFACE in potential_interfaces: name = DEFAULT_PRIMARY_INTERFACE else: - potential_interfaces.sort( - key=lambda x: int(''.join(i for i in x if i in string.digits))) - name = potential_interfaces[0] + name = sorted(potential_interfaces)[0] sysfs_mac = os.path.join(SYS_CLASS_NET, name, 'address') mac = util.load_file(sysfs_mac).strip() target_name = name - # generate net config for interface - nconf['config']['interfaces'][target_name] = { - 'mac_address': mac, 'name': target_name, 'type': 'physical', - 'mode': 'manual', 'inet': 'inet', - 'subnets': [{'type': 'dhcp4'}, {'type': 'dhcp6'}] - } - + nconf['config'].append( + {'type': 'physical', 'name': target_name, + 'mac_address': mac, 'subnets': [{'type': 'dhcp4'}]}) return nconf diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 538df7d9..bd04a6fe 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource): self.dsmode = 'local' self.seed = None self.cmdline_id = "ds=nocloud" - self.seed_dir = os.path.join(paths.seed_dir, 'nocloud') + self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'), + os.path.join(paths.seed_dir, 'nocloud-net')] + self.seed_dir = None self.supported_seed_starts = ("/", "file://") def __str__(self): @@ -67,15 +69,15 @@ class DataSourceNoCloud(sources.DataSource): pp2d_kwargs = {'required': ['user-data', 'meta-data'], 'optional': ['vendor-data', 'network-config']} - try: - seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs) - found.append(self.seed_dir) - LOG.debug("Using seeded data from %s", self.seed_dir) - except ValueError as e: - pass - - if self.seed_dir in found: - mydata = _merge_new_seed(mydata, seeded) + for path in self.seed_dirs: + try: + seeded = util.pathprefix2dict(path, **pp2d_kwargs) + found.append(path) + LOG.debug("Using seeded data from %s", path) + mydata = _merge_new_seed(mydata, seeded) + break + except ValueError as e: + pass # If the datasource config had a 'seedfrom' entry, then that takes # precedence over a 'seedfrom' that was found in a filesystem @@ -188,21 +190,19 @@ class DataSourceNoCloud(sources.DataSource): # if this is the local datasource or 'seedfrom' was used # and the source of the seed was self.dsmode. # Then see if there is network config to apply. + # note this is obsolete network-interfaces style seeding. if self.dsmode in ("local", seeded_network): if mydata['meta-data'].get('network-interfaces'): LOG.debug("Updating network interfaces from %s", self) self.distro.apply_network( mydata['meta-data']['network-interfaces']) - elif mydata.get('network-config'): - LOG.debug("Updating network config from %s", self) - self.distro.apply_network_config(mydata['network-config'], - bring_up=False) if mydata['meta-data']['dsmode'] == self.dsmode: self.seed = ",".join(found) self.metadata = mydata['meta-data'] self.userdata_raw = mydata['user-data'] self.vendordata_raw = mydata['vendor-data'] + self._network_config = mydata['network-config'] return True LOG.debug("%s: not claiming datasource, dsmode=%s", self, @@ -222,6 +222,10 @@ class DataSourceNoCloud(sources.DataSource): return None return quick_id == current + @property + def network_config(self): + return self._network_config + def _quick_read_instance_id(cmdline_id, dirs=None): if dirs is None: @@ -312,7 +316,7 @@ class DataSourceNoCloudNet(DataSourceNoCloud): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) self.cmdline_id = "ds=nocloud-net" self.supported_seed_starts = ("http://", "https://", "ftp://") - self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net') + self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud-net')] self.dsmode = "net" diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8e681e29..73090025 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -589,6 +589,7 @@ class Init(object): LOG.info("network config is disabled") return + LOG.info("Applying configuration: %s", netcfg) return self.distro.apply_network_config(netcfg) -- cgit v1.2.3 From 4445b881380a39a56490d8a8f9e07bba4540ec62 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 22 Mar 2016 05:39:58 -0400 Subject: fix quick_read_instance_id in nocloud for seed_dirs change --- cloudinit/sources/DataSourceNoCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index bd04a6fe..afd08935 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -217,7 +217,7 @@ class DataSourceNoCloud(sources.DataSource): return None quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, - dirs=[self.seed_dir]) + dirs=self.seed_dirs) if not quick_id: return None return quick_id == current -- cgit v1.2.3 From b21c2b4326f501032e06b3f85236ced1efa0b309 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 22 Mar 2016 15:35:13 -0700 Subject: Fixed few other misc issues. Enabled NICS even in failure case. Used util.del_dir() instead of shutil.rmtree. --- cloudinit/sources/DataSourceOVF.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index fc12cbb4..ccdd4fd0 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -24,7 +24,6 @@ from xml.dom import minidom import base64 import os -import shutil import re import time @@ -120,10 +119,10 @@ class DataSourceOVF(sources.DataSource): set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED) + enable_nics(nics) return False finally: - dirPath = os.path.dirname(vmwareImcConfigFilePath) - shutil.rmtree(dirPath) + util.del_dir(os.path.dirname(vmwareImcConfigFilePath)) try: LOG.debug("Applying the Network customization") @@ -135,13 +134,14 @@ class DataSourceOVF(sources.DataSource): set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_RUNNING, GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED) + enable_nics(nics) return False vmwarePlatformFound = True - enable_nics(nics) set_customization_status( GuestCustStateEnum.GUESTCUST_STATE_DONE, GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS) + enable_nics(nics) else: np = {'iso': transport_iso9660, 'vmware-guestd': transport_vmware_guestd, } -- cgit v1.2.3 From 2b85dabb802766e0b3b1949d744c8860c0cb838a Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 23 Mar 2016 11:05:22 -0500 Subject: configdata: parse and convert openstack network_data json to network_config --- cloudinit/net/__init__.py | 34 +++-- cloudinit/net/network_state.py | 45 ++++++- cloudinit/sources/DataSourceConfigDrive.py | 137 +++++++++++++++++++++ cloudinit/sources/helpers/openstack.py | 34 ++++- .../unittests/test_datasource/test_configdrive.py | 50 +++++++- 5 files changed, 289 insertions(+), 11 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index ae7b1c04..76cd4e8b 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -336,7 +336,7 @@ def iface_add_attrs(iface): 'index', 'subnets', ] - if iface['type'] not in ['bond', 'bridge']: + if iface['type'] not in ['bond', 'bridge', 'vlan']: ignore_map.append('mac_address') for key, value in iface.items(): @@ -348,19 +348,34 @@ def iface_add_attrs(iface): return content -def render_route(route): - content = "up route add" +def render_route(route, indent=""): + content = "" + up = indent + "post-up route add" + down = indent + "pre-down route del" + eol = " || true\n" mapping = { 'network': '-net', 'netmask': 'netmask', 'gateway': 'gw', 'metric': 'metric', } - for k in ['network', 'netmask', 'gateway', 'metric']: - if k in route: - content += " %s %s" % (mapping[k], route[k]) + if route['network'] == '0.0.0.0' and route['netmask'] == '0.0.0.0': + default_gw = " default gw %s" % route['gateway'] + content += up + default_gw + eol + content += down + default_gw + eol + elif route['network'] == '::' and route['netmask'] == 0: + # ipv6! + default_gw = " -A inet6 default gw %s" % route['gateway'] + content += up + default_gw + eol + content += down + default_gw + eol + else: + route_line = "" + for k in ['network', 'netmask', 'gateway', 'metric']: + if k in route: + route_line += " %s %s" % (mapping[k], route[k]) + content += up + route_line + eol + content += down + route_line + eol - content += '\n' return content @@ -384,6 +399,7 @@ def render_interfaces(network_state): if len(value): content += " dns-{} {}\n".format(dnskey, " ".join(value)) + content += "\n" for iface in sorted(interfaces.values(), key=lambda k: (order[k['type']], k['name'])): content += "auto {name}\n".format(**iface) @@ -409,6 +425,8 @@ def render_interfaces(network_state): content += iface_add_subnet(iface, subnet) content += iface_add_attrs(iface) + for route in subnet.get('routes', []): + content += render_route(route, indent=" ") content += "\n" else: content += "iface {name} {inet} {mode}\n".format(**iface) @@ -419,7 +437,7 @@ def render_interfaces(network_state): content += render_route(route) # global replacements until v2 format - content = content.replace('mac_address', 'hwaddress') + content = content.replace('mac_address', 'hwaddress ether') return content diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index df04c526..e32d2cdf 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -124,6 +124,17 @@ class NetworkState: iface = interfaces.get(command['name'], {}) for param, val in command.get('params', {}).items(): iface.update({param: val}) + + # convert subnet ipv6 netmask to cidr as needed + subnets = command.get('subnets') + if subnets: + for subnet in subnets: + if subnet['type'] == 'static': + if 'netmask' in subnet and ':' in subnet['address']: + subnet['netmask'] = mask2cidr(subnet['netmask']) + for route in subnet.get('routes', []): + if 'netmask' in route: + route['netmask'] = mask2cidr(route['netmask']) iface.update({ 'name': command.get('name'), 'type': command.get('type'), @@ -133,7 +144,7 @@ class NetworkState: 'mtu': command.get('mtu'), 'address': None, 'gateway': None, - 'subnets': command.get('subnets'), + 'subnets': subnets, }) self.network_state['interfaces'].update({command.get('name'): iface}) self.dump_network_state() @@ -144,6 +155,7 @@ class NetworkState: iface eth0.222 inet static address 10.10.10.1 netmask 255.255.255.0 + hwaddress ether BC:76:4E:06:96:B3 vlan-raw-device eth0 ''' required_keys = [ @@ -335,6 +347,37 @@ def cidr2mask(cidr): return ".".join([str(x) for x in mask]) +def ipv4mask2cidr(mask): + if '.' not in mask: + return mask + return sum([bin(int(x)).count('1') for x in mask.split('.')]) + + +def ipv6mask2cidr(mask): + if ':' not in mask: + return mask + + bitCount = [0, 0x8000, 0xc000, 0xe000, 0xf000, 0xf800, 0xfc00, 0xfe00, + 0xff00, 0xff80, 0xffc0, 0xffe0, 0xfff0, 0xfff8, 0xfffc, + 0xfffe, 0xffff] + cidr = 0 + for word in mask.split(':'): + if not word or int(word, 16) == 0: + break + cidr += bitCount.index(int(word, 16)) + + return cidr + + +def mask2cidr(mask): + if ':' in mask: + return ipv6mask2cidr(mask) + elif '.' in mask: + return ipv4mask2cidr(mask) + else: + return mask + + if __name__ == '__main__': import sys import random diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 6fc9e05b..d84fab54 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -18,6 +18,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import copy import os from cloudinit import log as logging @@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): self.seed_dir = os.path.join(paths.seed_dir, 'config_drive') self.version = None self.ec2_metadata = None + self._network_config = None + self.network_json = None self.files = {} def __str__(self): @@ -144,12 +147,27 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None + nd = results.get('networkdata') + self.networkdata_pure = nd + try: + self.network_json = openstack.convert_networkdata_json(nd) + except ValueError as e: + LOG.warn("Invalid content in network-data: %s", e) + self.network_json = None + + if self.network_json: + self._network_config = convert_network_data(self.network_json) + return True def check_instance_id(self): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) + @property + def network_config(self): + return self._network_config + class DataSourceConfigDriveNet(DataSourceConfigDrive): def __init__(self, sys_cfg, distro, paths): @@ -287,3 +305,122 @@ datasources = [ # Return a list of data sources that match this set of dependencies def get_datasource_list(depends): return sources.list_from_depends(depends, datasources) + + +# Convert OpenStack ConfigDrive NetworkData json to network_config yaml +def convert_network_data(network_json=None): + """Return a dictionary of network_config by parsing provided + OpenStack ConfigDrive NetworkData json format + + OpenStack network_data.json provides a 3 element dictionary + - "links" (links are network devices, physical or virtual) + - "networks" (networks are ip network configurations for one or more + links) + - services (non-ip services, like dns) + + networks and links are combined via network items referencing specific + links via a 'link_id' which maps to a links 'id' field. + + To convert this format to network_config yaml, we first iterate over the + links and then walk the network list to determine if any of the networks + utilize the current link; if so we generate a subnet entry for the device + + We also need to map network_data.json fields to network_config fields. For + example, the network_data links 'id' field is equivalent to network_config + 'name' field for devices. We apply more of this mapping to the various + link types that we encounter. + + There are additional fields that are populated in the network_data.json + from OpenStack that are not relevant to network_config yaml, so we + enumerate a dictionary of valid keys for network_yaml and apply filtering + to drop these superflous keys from the network_config yaml. + """ + if network_json is None: + return None + + # dict of network_config key for filtering network_json + valid_keys = { + 'physical': [ + 'name', + 'type', + 'mac_address', + 'subnets', + 'params', + ], + 'subnet': [ + 'type', + 'address', + 'netmask', + 'broadcast', + 'metric', + 'gateway', + 'pointopoint', + 'mtu', + 'scope', + 'dns_nameservers', + 'dns_search', + 'routes', + ], + } + + links = network_json.get('links', []) + networks = network_json.get('networks', []) + services = network_json.get('services', []) + + config = [] + for link in links: + subnets = [] + cfg = {k: v for k, v in link.items() + if k in valid_keys['physical']} + cfg.update({'name': link['id']}) + for network in [net for net in networks + if net['link'] == link['id']]: + subnet = {k: v for k, v in network.items() + if k in valid_keys['subnet']} + if 'dhcp' in network['type']: + t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4' + subnet.update({ + 'type': t, + }) + else: + subnet.update({ + 'type': 'static', + 'address': network.get('ip_address'), + }) + subnets.append(subnet) + cfg.update({'subnets': subnets}) + if link['type'] in ['ethernet', 'vif', 'ovs']: + cfg.update({ + 'type': 'physical', + 'mac_address': link['ethernet_mac_address']}) + elif link['type'] in ['bond']: + params = {} + for k, v in link.items(): + if k == 'bond_links': + continue + elif k.startswith('bond'): + params.update({k: v}) + cfg.update({ + 'bond_interfaces': copy.deepcopy(link['bond_links']), + 'params': params, + }) + elif link['type'] in ['vlan']: + cfg.update({ + 'name': "%s.%s" % (link['vlan_link'], + link['vlan_id']), + 'vlan_link': link['vlan_link'], + 'vlan_id': link['vlan_id'], + 'mac_address': link['vlan_mac_address'], + }) + else: + raise ValueError( + 'Unknown network_data link type: %s' % link['type']) + + config.append(cfg) + + for service in services: + cfg = service + cfg.update({'type': 'nameserver'}) + config.append(cfg) + + return {'version': 1, 'config': config} diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index bd93d22f..eb50a7be 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -51,11 +51,13 @@ OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' +OS_KILO = '2015-10-15' # keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, OS_HAVANA, + OS_KILO, ) @@ -229,6 +231,11 @@ class BaseReader(object): False, load_json_anytype, ) + files['networkdata'] = ( + self._path_join("openstack", version, 'network_data.json'), + False, + load_json_anytype, + ) return files results = { @@ -334,7 +341,7 @@ class ConfigDriveReader(BaseReader): path = self._path_join(self.base_path, 'openstack') found = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path))] - self._versions = found + self._versions = sorted(found) return self._versions def _read_ec2_metadata(self): @@ -490,3 +497,28 @@ def convert_vendordata_json(data, recurse=True): recurse=False) raise ValueError("vendordata['cloud-init'] cannot be dict") raise ValueError("Unknown data type for vendordata: %s" % type(data)) + + +def convert_networkdata_json(data, recurse=True): + """ data: a loaded json *object* (strings, arrays, dicts). + return something suitable for cloudinit networkdata_raw. + + if data is: + None: return None + string: return string + list: return data + the list is then processed in UserDataProcessor + dict: return convert_networkdata_json(data.get('cloud-init')) + """ + if not data: + return None + if isinstance(data, six.string_types): + return data + if isinstance(data, list): + return copy.deepcopy(data) + if isinstance(data, dict): + if recurse is True: + return convert_networkdata_json(data.get('cloud-init'), + recurse=False) + raise ValueError("networkdata['cloud-init'] cannot be dict") + raise ValueError("Unknown data type for networkdata: %s" % type(data)) diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index bfd787d1..01f8c5ce 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -59,6 +59,34 @@ OSTACK_META = { CONTENT_0 = b'This is contents of /etc/foo.cfg\n' CONTENT_1 = b'# this is /etc/bar/bar.cfg\n' +NETWORK_DATA = { + 'services': [ + {'type': 'dns', 'address': '199.204.44.24'}, + {'type': 'dns', 'address': '199.204.47.54'} + ], + 'links': [ + {'vif_id': '2ecc7709-b3f7-4448-9580-e1ec32d75bbd', + 'ethernet_mac_address': 'fa:16:3e:69:b0:58', + 'type': 'ovs', 'mtu': None, 'id': 'tap2ecc7709-b3'}, + {'vif_id': '2f88d109-5b57-40e6-af32-2472df09dc33', + 'ethernet_mac_address': 'fa:16:3e:d4:57:ad', + 'type': 'ovs', 'mtu': None, 'id': 'tap2f88d109-5b'}, + {'vif_id': '1a5382f8-04c5-4d75-ab98-d666c1ef52cc', + 'ethernet_mac_address': 'fa:16:3e:05:30:fe', + 'type': 'ovs', 'mtu': None, 'id': 'tap1a5382f8-04'} + ], + 'networks': [ + {'link': 'tap2ecc7709-b3', 'type': 'ipv4_dhcp', + 'network_id': '6d6357ac-0f70-4afa-8bd7-c274cc4ea235', + 'id': 'network0'}, + {'link': 'tap2f88d109-5b', 'type': 'ipv4_dhcp', + 'network_id': 'd227a9b3-6960-4d94-8976-ee5788b44f54', + 'id': 'network1'}, + {'link': 'tap1a5382f8-04', 'type': 'ipv4_dhcp', + 'network_id': 'dab2ba57-cae2-4311-a5ed-010b263891f5', + 'id': 'network2'} + ] +} CFG_DRIVE_FILES_V2 = { 'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META), @@ -70,7 +98,11 @@ CFG_DRIVE_FILES_V2 = { 'openstack/content/0000': CONTENT_0, 'openstack/content/0001': CONTENT_1, 'openstack/latest/meta_data.json': json.dumps(OSTACK_META), - 'openstack/latest/user_data': USER_DATA} + 'openstack/latest/user_data': USER_DATA, + 'openstack/latest/network_data.json': json.dumps(NETWORK_DATA), + 'openstack/2015-10-15/meta_data.json': json.dumps(OSTACK_META), + 'openstack/2015-10-15/user_data': USER_DATA, + 'openstack/2015-10-15/network_data.json': json.dumps(NETWORK_DATA)} class TestConfigDriveDataSource(TestCase): @@ -225,6 +257,7 @@ class TestConfigDriveDataSource(TestCase): self.assertEqual(USER_DATA, found['userdata']) self.assertEqual(expected_md, found['metadata']) + self.assertEqual(NETWORK_DATA, found['networkdata']) self.assertEqual(found['files']['/etc/foo.cfg'], CONTENT_0) self.assertEqual(found['files']['/etc/bar/bar.cfg'], CONTENT_1) @@ -321,6 +354,19 @@ class TestConfigDriveDataSource(TestCase): self.assertEqual(myds.get_public_ssh_keys(), [OSTACK_META['public_keys']['mykey']]) + def test_network_data_is_found(self): + """Verify that network_data is present in ds in config-drive-v2.""" + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + myds = cfg_ds_from_dir(self.tmp) + self.assertEqual(myds.network_json, NETWORK_DATA) + + def test_network_config_is_converted(self): + """Verify that network_data is converted and present on ds object.""" + populate_dir(self.tmp, CFG_DRIVE_FILES_V2) + myds = cfg_ds_from_dir(self.tmp) + network_config = ds.convert_network_data(NETWORK_DATA) + self.assertEqual(myds.network_config, network_config) + def cfg_ds_from_dir(seed_d): found = ds.read_config_drive(seed_d) @@ -339,6 +385,8 @@ def populate_ds_from_read_config(cfg_ds, source, results): cfg_ds.ec2_metadata = results.get('ec2-metadata') cfg_ds.userdata_raw = results.get('userdata') cfg_ds.version = results.get('version') + cfg_ds.network_json = results.get('networkdata') + cfg_ds._network_config = ds.convert_network_data(cfg_ds.network_json) def populate_dir(seed_dir, files): -- cgit v1.2.3 From 6b79e2c6f9a7342163691be9e785cef1aa642541 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 23 Mar 2016 14:17:10 -0500 Subject: fix openstack versions s/KILO/LIBERY drop networkdata read helper --- cloudinit/sources/DataSourceConfigDrive.py | 2 +- cloudinit/sources/helpers/openstack.py | 29 ++--------------------------- 2 files changed, 3 insertions(+), 28 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index d84fab54..15dddefe 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -150,7 +150,7 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): nd = results.get('networkdata') self.networkdata_pure = nd try: - self.network_json = openstack.convert_networkdata_json(nd) + self.network_json = util.load_json(nd) except ValueError as e: LOG.warn("Invalid content in network-data: %s", e) self.network_json = None diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py index eb50a7be..1aa6bbae 100644 --- a/cloudinit/sources/helpers/openstack.py +++ b/cloudinit/sources/helpers/openstack.py @@ -51,13 +51,13 @@ OS_LATEST = 'latest' OS_FOLSOM = '2012-08-10' OS_GRIZZLY = '2013-04-04' OS_HAVANA = '2013-10-17' -OS_KILO = '2015-10-15' +OS_LIBERTY = '2015-10-15' # keep this in chronological order. new supported versions go at the end. OS_VERSIONS = ( OS_FOLSOM, OS_GRIZZLY, OS_HAVANA, - OS_KILO, + OS_LIBERTY, ) @@ -497,28 +497,3 @@ def convert_vendordata_json(data, recurse=True): recurse=False) raise ValueError("vendordata['cloud-init'] cannot be dict") raise ValueError("Unknown data type for vendordata: %s" % type(data)) - - -def convert_networkdata_json(data, recurse=True): - """ data: a loaded json *object* (strings, arrays, dicts). - return something suitable for cloudinit networkdata_raw. - - if data is: - None: return None - string: return string - list: return data - the list is then processed in UserDataProcessor - dict: return convert_networkdata_json(data.get('cloud-init')) - """ - if not data: - return None - if isinstance(data, six.string_types): - return data - if isinstance(data, list): - return copy.deepcopy(data) - if isinstance(data, dict): - if recurse is True: - return convert_networkdata_json(data.get('cloud-init'), - recurse=False) - raise ValueError("networkdata['cloud-init'] cannot be dict") - raise ValueError("Unknown data type for networkdata: %s" % type(data)) -- cgit v1.2.3 From 32e81553907eaba84345252527f208d29151620f Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 23 Mar 2016 16:56:02 -0500 Subject: network_data: add link type 'phys', no need to reload json data --- cloudinit/sources/DataSourceConfigDrive.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 15dddefe..db813f6e 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -147,10 +147,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.warn("Invalid content in vendor-data: %s", e) self.vendordata_raw = None - nd = results.get('networkdata') - self.networkdata_pure = nd try: - self.network_json = util.load_json(nd) + self.network_json = results.get('networkdata') except ValueError as e: LOG.warn("Invalid content in network-data: %s", e) self.network_json = None @@ -389,7 +387,7 @@ def convert_network_data(network_json=None): }) subnets.append(subnet) cfg.update({'subnets': subnets}) - if link['type'] in ['ethernet', 'vif', 'ovs']: + if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']: cfg.update({ 'type': 'physical', 'mac_address': link['ethernet_mac_address']}) -- cgit v1.2.3 From eb8b2f0e7b777b756a4965ea784ce1354b5c6396 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 24 Mar 2016 12:51:31 -0400 Subject: provide datasource.check_instance_id with access to system config Changing this interface to allow for easy change later. The thing that this will enable is: a.) maas datasource to look at the system config and see if it is configured with the same consumer_key b.) datasource config could allow setting a variable that it would look at. --- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/sources/DataSourceNoCloud.py | 3 ++- cloudinit/sources/DataSourceOpenStack.py | 2 +- cloudinit/sources/__init__.py | 2 +- cloudinit/stages.py | 3 ++- 5 files changed, 7 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 832b3063..698f4cac 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -254,7 +254,7 @@ class DataSourceAzureNet(sources.DataSource): def get_config_obj(self): return self.cfg - def check_instance_id(self): + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index afd08935..f786516b 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -209,13 +209,14 @@ class DataSourceNoCloud(sources.DataSource): mydata['meta-data']['dsmode']) return False - def check_instance_id(self): + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid # we check kernel command line or files. current = self.get_instance_id() if not current: return None + LOG.info("Hi, I got some system config: %s", sys_cfg) quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, dirs=self.seed_dirs) if not quick_id: diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py index 79bb9d63..f7f4590b 100644 --- a/cloudinit/sources/DataSourceOpenStack.py +++ b/cloudinit/sources/DataSourceOpenStack.py @@ -150,7 +150,7 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource): return True - def check_instance_id(self): + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still valid return sources.instance_id_matches_system_uuid(self.get_instance_id()) diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c63464b2..82cd3553 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -217,7 +217,7 @@ class DataSource(object): def get_package_mirror_info(self): return self.distro.get_package_mirror_info(data_source=self) - def check_instance_id(self): + def check_instance_id(self, sys_cfg): # quickly (local check only) if self.instance_id is still return False diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 8ebbe6a9..5d6b0447 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -223,7 +223,8 @@ class Init(object): if ds and existing == "trust": myrep.description = "restored from cache: %s" % ds elif ds and existing == "check": - if hasattr(ds, 'check_instance_id') and ds.check_instance_id(): + if (hasattr(ds, 'check_instance_id') and + ds.check_instance_id(self.cfg)): myrep.description = "restored from checked cache: %s" % ds else: myrep.description = "cache invalid in datasource: %s" % ds -- cgit v1.2.3 From 5eedd9e6f15a49029e00aca83f863c89fdb6d198 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 24 Mar 2016 13:10:16 -0400 Subject: remove debug code --- cloudinit/sources/DataSourceNoCloud.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index f786516b..802d515b 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -216,7 +216,6 @@ class DataSourceNoCloud(sources.DataSource): if not current: return None - LOG.info("Hi, I got some system config: %s", sys_cfg) quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id, dirs=self.seed_dirs) if not quick_id: -- cgit v1.2.3 From 9c0b3fc96fc33107dde8e89b02a63dbfb04e207c Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 24 Mar 2016 13:41:25 -0500 Subject: fix review comments net: add render_route comment to document why we added || true to route statements DataSourceConfigDrive: Only convert network_json to network_config when caller reads network_config attr. Cache the conversion. --- cloudinit/net/__init__.py | 14 ++++++++++++++ cloudinit/sources/DataSourceConfigDrive.py | 6 +++--- 2 files changed, 17 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 76cd4e8b..2435055b 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -349,6 +349,20 @@ def iface_add_attrs(iface): def render_route(route, indent=""): + """ When rendering routes for an iface, in some cases applying a route + may result in the route command returning non-zero which produces + some confusing output for users manually using ifup/ifdown[1]. To + that end, we will optionally include an '|| true' postfix to each + route line allowing users to work with ifup/ifdown without using + --force option. + + We may at somepoint not want to emit this additional postfix, and + add a 'strict' flag to this function. When called with strict=True, + then we will not append the postfix. + + 1. http://askubuntu.com/questions/168033/ + how-to-set-static-routes-in-ubuntu-server + """ content = "" up = indent + "post-up route add" down = indent + "pre-down route del" diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index db813f6e..14676f97 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -153,9 +153,6 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): LOG.warn("Invalid content in network-data: %s", e) self.network_json = None - if self.network_json: - self._network_config = convert_network_data(self.network_json) - return True def check_instance_id(self): @@ -164,6 +161,9 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource): @property def network_config(self): + if self._network_config is None: + if self.network_json is not None: + self._network_config = convert_network_data(self.network_json) return self._network_config -- cgit v1.2.3 From 6c49afad6134c5094c5e21784e76735faf510a29 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 24 Mar 2016 17:11:26 -0400 Subject: some final changes a.) do not write systemd link files if we do not have a mac address. the check is updated to check for value rather than just presense (ie, 'mac_address': None) b.) DataSourceNoCloudNet: search in the nocloud seed dir this is important because NoCloud if dsmode is Net will look only would pass by, expecting NoCloudNet to pick it up but NoCloudNet would not look in /var/lib/cloud/seed/nocloud and thus skip it. c.) support the disabling of network configuration via /var/lib/cloud/data/upgraded-network This is what the package upgrader is writing. --- cloudinit/net/__init__.py | 4 ++-- cloudinit/sources/DataSourceNoCloud.py | 1 - cloudinit/stages.py | 5 +++++ 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 57beb837..40929c6e 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -407,7 +407,7 @@ def render_persistent_net(network_state): for iface in interfaces.values(): # for physical interfaces write out a persist net udev rule if iface['type'] == 'physical' and \ - 'name' in iface and 'mac_address' in iface: + 'name' in iface and iface.get('mac_address'): content += generate_udev_rule(iface['name'], iface['mac_address']) @@ -598,7 +598,7 @@ def render_systemd_links(target, network_state, interfaces = network_state.get('interfaces') for iface in interfaces.values(): if (iface['type'] == 'physical' and 'name' in iface and - 'mac_address' in iface): + iface.get('mac_address')): fname = fp_prefix + iface['name'] + ".link" with open(fname, "w") as fp: fp.write("\n".join([ diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 802d515b..c2fba4d2 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -316,7 +316,6 @@ class DataSourceNoCloudNet(DataSourceNoCloud): DataSourceNoCloud.__init__(self, sys_cfg, distro, paths) self.cmdline_id = "ds=nocloud-net" self.supported_seed_starts = ("http://", "https://", "ftp://") - self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud-net')] self.dsmode = "net" diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5d6b0447..143a4fc9 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -570,6 +570,11 @@ class Init(object): self._do_handlers(user_data_msg, c_handlers_list, frequency) def _find_networking_config(self): + disable_file = os.path.join( + self.paths.get_cpath('data'), 'upgraded-network') + if os.path.exists(disable_file): + return (None, disable_file) + cmdline_cfg = ('cmdline', net.read_kernel_cmdline_config()) dscfg = ('ds', None) if self.datasource and hasattr(self.datasource, 'network_config'): -- cgit v1.2.3 From 20cc8113dde9e6849e8a692aea64cf81a266406d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 24 Mar 2016 17:29:35 -0400 Subject: pyflakes --- cloudinit/sources/DataSourceConfigDrive.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 14676f97..3fa62ef3 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -382,8 +382,8 @@ def convert_network_data(network_json=None): }) else: subnet.update({ - 'type': 'static', - 'address': network.get('ip_address'), + 'type': 'static', + 'address': network.get('ip_address'), }) subnets.append(subnet) cfg.update({'subnets': subnets}) @@ -412,7 +412,7 @@ def convert_network_data(network_json=None): }) else: raise ValueError( - 'Unknown network_data link type: %s' % link['type']) + 'Unknown network_data link type: %s' % link['type']) config.append(cfg) -- cgit v1.2.3 From 4e3ec3a040adf1e5a44aafa1f7276fe83c3329ad Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 29 Mar 2016 11:38:47 -0700 Subject: Added a comment about /var/run/vmware-imc directory. --- cloudinit/sources/DataSourceOVF.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index ccdd4fd0..2a6cd050 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -91,6 +91,10 @@ class DataSourceOVF(sources.DataSource): deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", "libdeployPkgPlugin.so") if deployPkgPluginPath: + # When the VM is powered on, the "VMware Tools" daemon + # copies the customization specification file to + # /var/run/vmware-imc directory. cloud-init code needs + # to search for the file in that directory. vmwareImcConfigFilePath = util.log_time( logfunc=LOG.debug, msg="waiting for configuration file", -- cgit v1.2.3