From a110e483e8644ab73e69853ea11b6c4c6cfa04b6 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Wed, 6 Dec 2017 16:30:22 -0600 Subject: pylint: Update pylint to 1.7.1, run on tests/ and tools and fix complaints. The motivation for this is that a.) 1.7.1 runs with python 3.6 (bionic) b.) we want to run pylint on tests/ and tools for the same reasons that we want to run it on cloudinit/ The changes are described below. - Update tox.ini to invoke pylint v1.7.1. - Modify .pylintrc generated-members ignore mocked object members (m_.*) - Replace "dangerous" params defaulting to {} - Fix up cloud_tests use of platforms - Cast some instance objects to with dict() - Handle python2.7 vs 3+ ConfigParser use of readfp (deprecated) - Update use of assertEqual(, value) to assert(value) - replace depricated assertRegexp -> assertRegex - Remove useless test-class calls to super class - Assign class property accessors a result and use it - Fix missing class member in CepkoResultTests - Fix Cheetah test import --- tools/hacking.py | 172 ----------------------------------------------------- tools/make-mime.py | 2 +- tools/mock-meta.py | 45 +++++++------- 3 files changed, 22 insertions(+), 197 deletions(-) delete mode 100755 tools/hacking.py (limited to 'tools') diff --git a/tools/hacking.py b/tools/hacking.py deleted file mode 100755 index e6a05136..00000000 --- a/tools/hacking.py +++ /dev/null @@ -1,172 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2012, Cloudscaling -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""cloudinit HACKING file compliance testing (based off of nova hacking.py) - -built on top of pep8.py -""" - -import inspect -import logging -import re -import sys - -import pep8 - -# Don't need this for testing -logging.disable('LOG') - -# N1xx comments -# N2xx except -# N3xx imports -# N4xx docstrings -# N[5-9]XX (future use) - -DOCSTRING_TRIPLE = ['"""', "'''"] -VERBOSE_MISSING_IMPORT = False -_missingImport = set([]) - - -def import_normalize(line): - # convert "from x import y" to "import x.y" - # handle "from x import y as z" to "import x.y as z" - split_line = line.split() - if (line.startswith("from ") and "," not in line and - split_line[2] == "import" and split_line[3] != "*" and - split_line[1] != "__future__" and - (len(split_line) == 4 or (len(split_line) == 6 and - split_line[4] == "as"))): - return "import %s.%s" % (split_line[1], split_line[3]) - else: - return line - - -def cloud_import_alphabetical(physical_line, line_number, lines): - """Check for imports in alphabetical order. - - HACKING guide recommendation for imports: - imports in human alphabetical order - N306 - """ - # handle import x - # use .lower since capitalization shouldn't dictate order - split_line = import_normalize(physical_line.strip()).lower().split() - split_previous = import_normalize(lines[line_number - 2]) - split_previous = split_previous.strip().lower().split() - # with or without "as y" - length = [2, 4] - if (len(split_line) in length and len(split_previous) in length and - split_line[0] == "import" and split_previous[0] == "import"): - if split_line[1] < split_previous[1]: - return (0, "N306: imports not in alphabetical order (%s, %s)" - % (split_previous[1], split_line[1])) - - -def cloud_docstring_start_space(physical_line): - """Check for docstring not start with space. - - HACKING guide recommendation for docstring: - Docstring should not start with space - N401 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) > pos + 1): - if (physical_line[pos + 3] == ' '): - return (pos, - "N401: one line docstring should not start with a space") - - -def cloud_todo_format(physical_line): - """Check for 'TODO()'. - - HACKING guide recommendation for TODO: - Include your name with TODOs as in "#TODO(termie)" - N101 - """ - pos = physical_line.find('TODO') - pos1 = physical_line.find('TODO(') - pos2 = physical_line.find('#') # make sure it's a comment - if (pos != pos1 and pos2 >= 0 and pos2 < pos): - return pos, "N101: Use TODO(NAME)" - - -def cloud_docstring_one_line(physical_line): - """Check one line docstring end. - - HACKING guide recommendation for one line docstring: - A one line docstring looks like this and ends in a period. - N402 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end - if (pos != -1 and end and len(physical_line) > pos + 4): - if (physical_line[-5] != '.'): - return pos, "N402: one line docstring needs a period" - - -def cloud_docstring_multiline_end(physical_line): - """Check multi line docstring end. - - HACKING guide recommendation for docstring: - Docstring should end on a new line - N403 - """ - pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start - if (pos != -1 and len(physical_line) == pos): - print(physical_line) - if (physical_line[pos + 3] == ' '): - return (pos, "N403: multi line docstring end on new line") - - -current_file = "" - - -def readlines(filename): - """Record the current file being tested.""" - pep8.current_file = filename - return open(filename).readlines() - - -def add_cloud(): - """Monkey patch pep8 for cloud-init guidelines. - - Look for functions that start with cloud_ - and add them to pep8 module. - - Assumes you know how to write pep8.py checks - """ - for name, function in globals().items(): - if not inspect.isfunction(function): - continue - if name.startswith("cloud_"): - exec("pep8.%s = %s" % (name, name)) - - -if __name__ == "__main__": - # NOVA based 'hacking.py' error codes start with an N - pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}') - add_cloud() - pep8.current_file = current_file - pep8.readlines = readlines - try: - pep8._main() - finally: - if len(_missingImport) > 0: - sys.stderr.write( - "%i imports missing in this test environment\n" % - len(_missingImport)) - -# vi: ts=4 expandtab diff --git a/tools/make-mime.py b/tools/make-mime.py index f6a72044..d321479b 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -23,7 +23,7 @@ def file_content_type(text): filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) except ValueError: - raise argparse.ArgumentError("Invalid value for %r" % (text)) + raise argparse.ArgumentError(text, "Invalid value for %r" % (text)) def main(): diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a5d14ab7..724f7fc4 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -17,6 +17,7 @@ Then: ec2metadata --instance-id """ +import argparse import functools import json import logging @@ -27,8 +28,6 @@ import string import sys import yaml -from optparse import OptionParser - try: from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler import httplib as hclient @@ -415,29 +414,27 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'): def extract_opts(): - parser = OptionParser() - parser.add_option("-p", "--port", dest="port", action="store", type=int, - default=80, metavar="PORT", - help=("port from which to serve traffic" - " (default: %default)")) - parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='::', metavar="ADDRESS", - help=("address from which to serve traffic" - " (default: %default)")) - parser.add_option("-f", '--user-data-file', dest='user_data_file', - action='store', metavar='FILE', - help=("user data filename to serve back to" - "incoming requests")) - (options, args) = parser.parse_args() - out = dict() - out['extra'] = args - out['port'] = options.port - out['user_data_file'] = None - out['address'] = options.address - if options.user_data_file: - if not os.path.isfile(options.user_data_file): + parser = argparse.ArgumentParser() + parser.add_argument("-p", "--port", dest="port", action="store", type=int, + default=80, metavar="PORT", + help=("port from which to serve traffic" + " (default: %default)")) + parser.add_argument("-a", "--addr", dest="address", action="store", + type=str, default='::', metavar="ADDRESS", + help=("address from which to serve traffic" + " (default: %default)")) + parser.add_argument("-f", '--user-data-file', dest='user_data_file', + action='store', metavar='FILE', + help=("user data filename to serve back to" + "incoming requests")) + parser.add_argument('extra', nargs='*') + args = parser.parse_args() + out = {'port': args.port, 'address': args.address, 'extra': args.extra, + 'user_data_file': None} + if args.user_data_file: + if not os.path.isfile(args.user_data_file): parser.error("Option -f specified a non-existent file") - with open(options.user_data_file, 'rb') as fh: + with open(args.user_data_file, 'rb') as fh: out['user_data_file'] = fh.read() return out -- cgit v1.2.3 From a5dc0f425facf404344fb7baaf2b9136df143ecf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 6 Dec 2017 17:26:52 -0700 Subject: OVF: improve ds-identify to support finding OVF iso transport. Previously the OVF transport would not be identified except for when config files set 'ovf_vmware_guest_customization'. It would also return DS_MAYBE almost always. The change here is to add support to ds-identify for storing the iso9660 filesystems that it finds (ISO9660_DEVS). Then the OVF check will check that the iso9660 filesystem has ovf-env.xml on it. The least wonderful part of this is that the check is done by 'grep' for case insensitive ovf-env.xml. Future improvement would be to identify VMware's OVF by label or UUID so we could avoid the grep. LP: #1731868 --- tests/unittests/test_ds_identify.py | 85 ++++++++++++++++++++++++++- tools/ds-identify | 112 +++++++++++++++++++++++++----------- 2 files changed, 161 insertions(+), 36 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7a920d42..3f1a6712 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -32,6 +32,7 @@ POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" DI_EC2_STRICT_ID_DEFAULT = "true" +OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' SHELL_MOCK_TMPL = """\ %(name)s() { @@ -55,6 +56,7 @@ P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} +MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -296,6 +298,48 @@ class TestDsIdentify(CiTestCase): data, RC_FOUND, dslist=['OpenStack', 'None']) self.assertIn("check for 'OpenStack' returned maybe", err) + def test_default_ovf_is_found(self): + """OVF is identified found when ovf/ovf-env.xml seed file exists.""" + self._test_ds_found('OVF-seed') + + def test_default_ovf_with_detect_virt_none_not_found(self): + """OVF identifies not found when detect_virt returns "none".""" + self._check_via_dict( + {'ds': 'OVF'}, rc=RC_NOT_FOUND, policy_dmi="disabled") + + def test_default_ovf_returns_not_found_on_azure(self): + """OVF datasource won't be found as false positive on Azure.""" + ovfonazure = copy.deepcopy(VALID_CFG['OVF']) + # Set azure asset tag to assert OVF content not found + ovfonazure['files'][P_CHASSIS_ASSET_TAG] = ( + '7783-7084-3265-9085-8269-3286-77\n') + self._check_via_dict( + ovfonazure, RC_FOUND, dslist=['Azure', DS_NONE]) + + def test_ovf_on_vmware_iso_found_by_cdrom_with_ovf_schema_match(self): + """OVF is identified when iso9660 cdrom path contains ovf schema.""" + self._test_ds_found('OVF') + + def test_ovf_on_vmware_iso_found_when_vmware_customization(self): + """OVF is identified when vmware customization is enabled.""" + self._test_ds_found('OVF-vmware-customization') + + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): + """OVF is identified when iso9660 cdrom label has ovf-transport.""" + ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) + # Unset matching cdrom ovf schema content + ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Add recognized labels + for valid_fs_label in ['ovf-transport', 'OVF-TRANSPORT']: + ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', + 'LABEL': valid_fs_label}]) + self._check_via_dict( + ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -305,7 +349,9 @@ def blkid_out(disks=None): for disk in disks: if not disk["DEVNAME"].startswith("/dev/"): disk["DEVNAME"] = "/dev/" + disk["DEVNAME"] - for key in disk: + # devname needs to be first. + lines.append("%s=%s" % ("DEVNAME", disk["DEVNAME"])) + for key in [d for d in disk if d != "DEVNAME"]: lines.append("%s=%s" % (key, disk[key])) lines.append("") return '\n'.join(lines) @@ -383,6 +429,43 @@ VALID_CFG = { 'policy_dmi': POLICY_FOUND_ONLY, 'policy_no_dmi': POLICY_FOUND_ONLY, }, + 'OVF-seed': { + 'ds': 'OVF', + 'files': { + os.path.join(P_SEED_DIR, 'ovf', 'ovf-env.xml'): 'present\n', + } + }, + 'OVF-vmware-customization': { + 'ds': 'OVF', + 'mocks': [ + # Include a mockes iso9660 potential, even though content not ovf + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'no match', + # Setup vmware customization enabled + 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so': 'here', + 'etc/cloud/cloud.cfg': 'disable_vmware_customization: false\n', + } + }, + 'OVF': { + 'ds': 'OVF', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + }, + MOCK_VIRT_IS_VMWARE, + ], + 'files': { + 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + } + }, 'ConfigDrive': { 'ds': 'ConfigDrive', 'mocks': [ diff --git a/tools/ds-identify b/tools/ds-identify index ee5e05a4..4c59d7bc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -83,6 +83,7 @@ _DI_LOGGED="" # set DI_MAIN='noop' in environment to source this file with no main called. DI_MAIN=${DI_MAIN:-main} +DI_BLKID_OUTPUT="" DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" DI_DMI_CHASSIS_ASSET_TAG="" @@ -91,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" DI_PID_1_PRODUCT_NAME="" @@ -181,32 +183,43 @@ block_dev_with_label() { return 0 } -read_fs_labels() { - cached "${DI_FS_LABELS}" && return 0 +read_fs_info() { + cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. # note that older blkid versions do not report DEVNAME in 'export' output. - local out="" ret=0 oifs="$IFS" line="" delim="," - local labels="" if is_container; then # blkid will in a container, or at least currently in lxd # not provide useful information. DI_FS_LABELS="$UNAVAILABLE:container" - else - out=$(blkid -c /dev/null -o export) || { - ret=$? - error "failed running [$ret]: blkid -c /dev/null -o export" - return $ret - } - IFS="$CR" - set -- $out - IFS="$oifs" - for line in "$@"; do - case "${line}" in - LABEL=*) labels="${labels}${line#LABEL=}${delim}";; - esac - done - DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="$UNAVAILABLE:container" + return fi + local oifs="$IFS" line="" delim="," + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + DI_FS_LABELS="$UNAVAILABLE:error" + DI_ISO9660_DEVS="$UNAVAILABLE:error" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@" ""; do + case "${line}" in + DEVNAME=*) dev=${line#DEVNAME=};; + LABEL=*) label="${line#LABEL=}"; + labels="${labels}${line#LABEL=}${delim}";; + TYPE=*) ftype=${line#TYPE=};; + "") if [ "$ftype" = "iso9660" ]; then + isodevs="${isodevs} ${dev}=$label" + fi + ftype=""; devname=""; label=""; + esac + done + DI_FS_LABELS="${labels%${delim}}" + DI_ISO9660_DEVS="${isodevs# }" } cached() { @@ -214,10 +227,6 @@ cached() { } -has_cdrom() { - [ -e "${PATH_ROOT}/dev/cdrom" ] -} - detect_virt() { local virt="${UNAVAILABLE}" r="" out="" if [ -d /run/systemd ]; then @@ -621,14 +630,13 @@ ovf_vmware_guest_customization() { [ "${DI_VIRT}" = "vmware" ] || return 1 # we have to have the plugin to do vmware customization - local found="" pkg="" pre="/usr/lib" + local found="" pkg="" pre="${PATH_ROOT}/usr/lib" for pkg in vmware-tools open-vm-tools; do if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then found="$pkg"; break; fi done [ -n "$found" ] || return 1 - # vmware customization is disabled by default # (disable_vmware_customization=true). If it is set to false, then # user has requested customization. @@ -644,20 +652,55 @@ ovf_vmware_guest_customization() { return 1 } +is_cdrom_ovf() { + local dev="$1" label="$2" + # skip devices that don't look like cdrom paths. + case "$dev" in + /dev/sr[0-9]|/dev/hd[a-z]) :;; + *) debug 1 "skipping iso dev $d" + return 1;; + esac + + # fast path known 'OVF' labels + [ "$label" = "OVF-TRANSPORT" -o "$label" = "ovf-transport" ] && return 0 + + # explicitly skip known labels of other types. rd_rdfe is azure. + case "$label" in + config-2|rd_rdfe_stable*) return 1;; + esac + + local idstr="http://schemas.dmtf.org/ovf/environment/1" + grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" +} + dscheck_OVF() { - local p="" check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + [ "${DI_VIRT}" = "none" ] && return ${DS_NOT_FOUND} + + # Azure provides ovf. Skip false positive by dis-allowing. + is_azure_chassis && return $DS_NOT_FOUND + + local isodevs="${DI_ISO9660_DEVS}" + case "$isodevs" in + ""|$UNAVAILABLE:*) return ${DS_NOT_FOUND};; + esac + + # DI_ISO9660_DEVS is =label, like /dev/sr0=OVF-TRANSPORT + for tok in $isodevs; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND + done + if ovf_vmware_guest_customization; then return ${DS_FOUND} fi - has_cdrom || return ${DS_NOT_FOUND} + return ${DS_NOT_FOUND} +} - # FIXME: currently just return maybe if there is a cdrom - # ovf iso9660 transport does not specify an fs label. - # better would be to check if - return ${DS_MAYBE} +is_azure_chassis() { + local azure_chassis="7783-7084-3265-9085-8269-3286-77" + dmi_chassis_asset_tag_matches "${azure_chassis}" } dscheck_Azure() { @@ -667,8 +710,7 @@ dscheck_Azure() { # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" # TYPE="udf">/dev/sr0 # - local azure_chassis="7783-7084-3265-9085-8269-3286-77" - dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND + is_azure_chassis && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} @@ -930,7 +972,7 @@ collect_info() { read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid - read_fs_labels + read_fs_info } print_info() { @@ -942,7 +984,7 @@ _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" - vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars FS_LABELS ISO9660_DEVS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" vars="$vars DSNAME DSLIST" -- cgit v1.2.3 From a30a3bb5baec4da1d8f91385849e9b5b826678bf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 12 Dec 2017 11:41:26 -0700 Subject: ds-identify: failure in NoCloud due to unset variable usage. The previous OVF datasource change added a debug message that referenced an un-used variable. The failure path would be triggered if an image was booted with a iso9660 filesystem attached to a device that was not a cdrom. A unit test is added for the specific failure found. Additional safety to avoid 'cidata' labels is also added to the OVF checker. LP: #1737704 --- tests/unittests/test_ds_identify.py | 25 +++++++++++++++++++++++++ tools/ds-identify | 4 ++-- 2 files changed, 27 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 3f1a6712..c9234edd 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -27,6 +27,14 @@ TYPE=ext4 PARTUUID=30c65c77-e07d-4039-b2fb-88b1fb5fa1fc """ +# this is a Ubuntu 18.04 disk.img output (dual uefi and bios bootable) +BLKID_UEFI_UBUNTU = [ + {'DEVNAME': 'vda1', 'TYPE': 'ext4', 'PARTUUID': uuid4(), 'UUID': uuid4()}, + {'DEVNAME': 'vda14', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda15', 'TYPE': 'vfat', 'LABEL': 'UEFI', 'PARTUUID': uuid4(), + 'UUID': '5F55-129B'}] + + POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" @@ -340,6 +348,10 @@ class TestDsIdentify(CiTestCase): self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_default_nocloud_as_vdb_iso9660(self): + """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" + self._test_ds_found('NoCloud') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -422,6 +434,19 @@ VALID_CFG = { 'files': {P_PRODUCT_SERIAL: 'GoogleCloud-8f2e88f\n'}, 'mocks': [MOCK_VIRT_IS_KVM], }, + 'NoCloud': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'cidata'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'OpenStack': { 'ds': 'OpenStack', 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index 4c59d7bc..5893a761 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -657,7 +657,7 @@ is_cdrom_ovf() { # skip devices that don't look like cdrom paths. case "$dev" in /dev/sr[0-9]|/dev/hd[a-z]) :;; - *) debug 1 "skipping iso dev $d" + *) debug 1 "skipping iso dev $dev" return 1;; esac @@ -666,7 +666,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|rd_rdfe_stable*) return 1;; + config-2|rd_rdfe_stable*|cidata) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From eb70975eaf37cf9549949f72e7647addb81a52ac Mon Sep 17 00:00:00 2001 From: James Penick Date: Tue, 23 Jan 2018 14:22:54 -0700 Subject: Recognize uppercase vfat disk labels New mkfs.vfat and fatlabel tools included in the dosfsutils package no longer support creating vfat disks with lowercase labels. They silently default to an all uppercase label eg CONFIG-2 instead of config-2. This change makes cloud-init handle either upper or lower case. LP: #1598783 --- cloudinit/sources/DataSourceConfigDrive.py | 4 ++-- tests/unittests/test_datasource/test_configdrive.py | 6 ++++++ tests/unittests/test_ds_identify.py | 17 +++++++++++++++++ tools/ds-identify | 4 +++- 4 files changed, 28 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index 870b3688..b8db6267 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -25,7 +25,7 @@ DEFAULT_METADATA = { "instance-id": DEFAULT_IID, } FS_TYPES = ('vfat', 'iso9660') -LABEL_TYPES = ('config-2',) +LABEL_TYPES = ('config-2', 'CONFIG-2') POSSIBLE_MOUNTS = ('sr', 'cd') OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS for i in range(0, 2))) @@ -224,7 +224,7 @@ def find_candidate_devs(probe_optical=True): config drive v2: Disk should be: * either vfat or iso9660 formated - * labeled with 'config-2' + * labeled with 'config-2' or 'CONFIG-2' """ # query optical drive to get it in blkid cache for 2.6 kernels if probe_optical: diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py index 6ef5a35c..68400f22 100644 --- a/tests/unittests/test_datasource/test_configdrive.py +++ b/tests/unittests/test_datasource/test_configdrive.py @@ -458,6 +458,12 @@ class TestConfigDriveDataSource(CiTestCase): self.assertEqual(["/dev/vdb3"], ds.find_candidate_devs()) + # Verify that uppercase labels are also found. + devs_with_answers = {"TYPE=vfat": [], + "TYPE=iso9660": ["/dev/vdb"], + "LABEL=CONFIG-2": ["/dev/vdb"]} + self.assertEqual(["/dev/vdb"], ds.find_candidate_devs()) + finally: util.find_devs_with = orig_find_devs_with util.is_partition = orig_is_partition diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c9234edd..ad6c5cf4 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -232,6 +232,11 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDrive') return + def test_config_drive_upper(self): + """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" + self._test_ds_found('ConfigDriveUpper') + return + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -503,6 +508,18 @@ VALID_CFG = { }, ], }, + 'ConfigDriveUpper': { + 'ds': 'ConfigDrive', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CONFIG-2'}]) + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 5893a761..374c3ad1 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -579,6 +579,8 @@ dscheck_NoCloud() { check_configdrive_v2() { if has_fs_with_label "config-2"; then return ${DS_FOUND} + elif has_fs_with_label "CONFIG-2"; then + return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory # openstack/YYYY-MM-DD format with a file meta_data.json @@ -666,7 +668,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 30597f28512fafbe25486df5865b628d859486c6 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 24 Jan 2018 10:25:48 -0500 Subject: tools/read-version: Fix read-version when in a git worktree. read-version --json would report bad data when working in a worktree. This is just because in a worktree, .git is not a directory, but rather a metadata file that points to the another path. $ git worktree ../mytree $ cat ../mytree/.git gitdir: /path/to/cloud-init/.git/worktrees/mytree $ rm -Rf ../mytree; git worktree prune --- tools/read-version | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/read-version b/tools/read-version index d9ed30da..3ea9e66e 100755 --- a/tools/read-version +++ b/tools/read-version @@ -45,6 +45,19 @@ def which(program): return None +def is_gitdir(path): + # Return boolean indicating if path is a git tree. + git_meta = os.path.join(path, '.git') + if os.path.isdir(git_meta): + return True + if os.path.exists(git_meta): + # in a git worktree, .git is a file with 'gitdir: x' + with open(git_meta, "rb") as fp: + if b'gitdir:' in fp.read(): + return True + return False + + use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG') use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS') output_json = '--json' in sys.argv @@ -52,7 +65,7 @@ output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None -if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): +if is_gitdir(_tdir) and which("git"): flags = [] if use_tags: flags = ['--tags'] -- cgit v1.2.3 From 5e5dc9731f39e8b1df767fbaf850fbbd31355a1a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 25 Jan 2018 12:51:03 -0500 Subject: OVF: Extend well-known labels to include OVFENV. Fujitsu Cloud Service attaches a ovf iso transport with a label 'OVFENV'. This seems to be a reasonable value as a label. While the for bug 1731868 would likely fix cloud-init on fujitsu cloud, this change will find it faster. LP: #1698669 --- tests/unittests/test_ds_identify.py | 8 +++++--- tools/ds-identify | 4 +++- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index ad6c5cf4..31cc6223 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -338,7 +338,7 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('OVF-vmware-customization') def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): - """OVF is identified when iso9660 cdrom label has ovf-transport.""" + """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) # Unset matching cdrom ovf schema content ovf_cdrom_by_label['files']['dev/sr0'] = 'No content match' @@ -346,10 +346,12 @@ class TestDsIdentify(CiTestCase): ovf_cdrom_by_label, rc=RC_NOT_FOUND, policy_dmi="disabled") # Add recognized labels - for valid_fs_label in ['ovf-transport', 'OVF-TRANSPORT']: + valid_ovf_labels = ['ovf-transport', 'OVF-TRANSPORT', + "OVFENV", "ovfenv"] + for valid_ovf_label in valid_ovf_labels: ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ {'DEVNAME': 'sr0', 'TYPE': 'iso9660', - 'LABEL': valid_fs_label}]) + 'LABEL': valid_ovf_label}]) self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) diff --git a/tools/ds-identify b/tools/ds-identify index 374c3ad1..cd268242 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -664,7 +664,9 @@ is_cdrom_ovf() { esac # fast path known 'OVF' labels - [ "$label" = "OVF-TRANSPORT" -o "$label" = "ovf-transport" ] && return 0 + case "$label" in + OVF-TRANSPORT|ovf-transport|OVFENV|ovfenv) return 0;; + esac # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in -- cgit v1.2.3