From a1ca220d137cf7b3f79b516980a042ec800a8d91 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 9 Feb 2018 17:59:13 -0500 Subject: tools: run-centos: git clone rather than tar. This changes tools/run-centos to collect up your git working directory via 'git' commands rather than just collecting the whole directory. The reason for this is that even a clean tree that has had tox run on it might have up to 400M of data in it. It adds a '--dirty' flag to run-centos to collect up local changes. --- tools/run-centos | 88 ++++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 76 insertions(+), 12 deletions(-) (limited to 'tools') diff --git a/tools/run-centos b/tools/run-centos index d58ef3e8..6ac6c11f 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -23,6 +23,9 @@ Usage: ${0##*/} [ options ] version options: -a | --artifact keep .rpm artifacts + --dirty apply local changes before running tests. + If not provided, a clean checkout of branch is tested. + Inside container, changes are in local-changes.diff. -k | --keep keep container after tests -r | --rpm build .rpm -s | --srpm build .src.rpm @@ -80,25 +83,84 @@ inside() { inject_cloud_init(){ # take current cloud-init git dir and put it inside $name at # ~$user/cloud-init. - local name="$1" user="$2" top_d="" dname="" pstat="" - top_d=$(git rev-parse --show-toplevel) || { - errorrc "Failed to get git top level in $PWD"; + local name="$1" user="$2" dirty="$3" + local changes="" top_d="" dname="cloud-init" pstat="" + local gitdir="" commitish="" + gitdir=$(git rev-parse --git-dir) || { + errorrc "Failed to get git dir in $PWD"; return } - dname=$(basename "${top_d}") || return - debug 1 "collecting ${top_d} ($dname) into user $user in $name." - tar -C "${top_d}/.." -cpf - "$dname" | + local t=${gitdir%/*} + case "$t" in + */worktrees) + if [ -f "${t%worktrees}/config" ]; then + gitdir="${t%worktrees}" + fi + esac + + # attempt to get branch name. + commitish=$(git rev-parse --abbrev-ref HEAD) || { + errorrc "Failed git rev-parse --abbrev-ref HEAD" + return + } + if [ "$commitish" = "HEAD" ]; then + # detached head + commitish=$(git rev-parse HEAD) || { + errorrc "failed git rev-parse HEAD" + return + } + fi + + local local_changes=false + if ! git diff --quiet "$commitish"; then + # there are local changes not committed. + local_changes=true + if [ "$dirty" = "false" ]; then + error "WARNING: You had uncommitted changes. Those changes will " + error "be put into 'local-changes.diff' inside the container. " + error "To test these changes you must pass --dirty." + fi + fi + + debug 1 "collecting ${gitdir} ($dname) into user $user in $name." + tar -C "${gitdir}" -cpf - . | inside_as "$name" "$user" sh -ec ' dname=$1 + commitish=$2 rm -Rf "$dname" + mkdir -p $dname/.git + cd $dname/.git tar -xpf - - [ "$dname" = "cloud-init" ] || mv "$dname" cloud-init' \ - extract "$dname" + cd .. + git config core.bare false + out=$(git checkout $commitish 2>&1) || + { echo "failed git checkout $commitish: $out" 1>&2; exit 1; } + out=$(git checkout . 2>&1) || + { echo "failed git checkout .: $out" 1>&2; exit 1; } + ' extract "$dname" "$commitish" [ "${PIPESTATUS[*]}" = "0 0" ] || { - error "Failed to push tarball of '$top_d' into $name" \ + error "Failed to push tarball of '$gitdir' into $name" \ " for user $user (dname=$dname)" return 1 } + + echo "local_changes=$local_changes dirty=$dirty" + if [ "$local_changes" = "true" ]; then + git diff "$commitish" | + inside_as "$name" "$user" sh -exc ' + cd "$1" + if [ "$2" = "true" ]; then + git apply + else + cat > local-changes.diff + fi + ' insert_changes "$dname" "$dirty" + [ "${PIPESTATUS[*]}" = "0 0" ] || { + error "Failed to apply local changes." + return 1 + } + fi + return 0 } @@ -179,7 +241,7 @@ delete_container() { main() { local short_opts="ahkrsuv" - local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose" + local long_opts="artifact,dirty,help,keep,rpm,srpm,unittest,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -188,11 +250,13 @@ main() { local cur="" next="" local artifact="" keep="" rpm="" srpm="" unittest="" version="" + local dirty=false while [ $# -ne 0 ]; do cur="${1:-}"; next="${2:-}"; case "$cur" in -a|--artifact) artifact=1;; + --dirty) dirty=true;; -h|--help) Usage ; exit 0;; -k|--keep) KEEP=true;; -r|--rpm) rpm=1;; @@ -231,7 +295,7 @@ main() { inside "$name" useradd "$user" debug 1 "inserting cloud-init" - inject_cloud_init "$name" "$user" || { + inject_cloud_init "$name" "$user" "$dirty" || { errorrc "FAIL: injecting cloud-init into $name failed." return } @@ -244,7 +308,7 @@ main() { local errors=0 inside_as_cd "$name" "$user" "$cdir" \ - sh -ec "git checkout .; git status" || + sh -ec "git status" || { errorrc "git checkout failed."; errors=$(($errors+1)); } if [ -n "$unittest" ]; then -- cgit v1.2.3 From 754f54037aca0f604b8b57ab71b30dad5e5066cf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Feb 2018 13:54:50 -0700 Subject: tests: run nosetests in cloudinit/ directory, fix py26 fallout. When we moved some tests to live under cloudinit/ we inadvertantly failed to change all things that would run nose to include that directory. This changes all the 'nose' invocations to consistently run with tests/unittests and cloudinit/. Also, it works around, more correctly this time, a python2.6-ism with the following code: with assertRaises(SystemExit) as cm: sys.exit(2) --- cloudinit/cmd/tests/test_clean.py | 3 ++- cloudinit/cmd/tests/test_status.py | 3 ++- cloudinit/tests/helpers.py | 20 +++++++++----------- tests/unittests/test_handler/test_schema.py | 12 +++++++----- tools/run-centos | 3 ++- tox.ini | 6 +++--- 6 files changed, 25 insertions(+), 22 deletions(-) (limited to 'tools') diff --git a/cloudinit/cmd/tests/test_clean.py b/cloudinit/cmd/tests/test_clean.py index 6713af4f..5a3ec3bf 100644 --- a/cloudinit/cmd/tests/test_clean.py +++ b/cloudinit/cmd/tests/test_clean.py @@ -165,10 +165,11 @@ class TestClean(CiTestCase): wrap_and_call( 'cloudinit.cmd.clean', {'Init': {'side_effect': self.init_class}, + 'sys.exit': {'side_effect': self.sys_exit}, 'sys.argv': {'new': ['clean', '--logs']}}, clean.main) - self.assertRaisesCodeEqual(0, context_manager.exception.code) + self.assertEqual(0, context_manager.exception.code) self.assertFalse( os.path.exists(self.log1), 'Unexpected log {0}'.format(self.log1)) diff --git a/cloudinit/cmd/tests/test_status.py b/cloudinit/cmd/tests/test_status.py index 4a5a8c06..37a89936 100644 --- a/cloudinit/cmd/tests/test_status.py +++ b/cloudinit/cmd/tests/test_status.py @@ -380,10 +380,11 @@ class TestStatus(CiTestCase): wrap_and_call( 'cloudinit.cmd.status', {'sys.argv': {'new': ['status']}, + 'sys.exit': {'side_effect': self.sys_exit}, '_is_cloudinit_disabled': (False, ''), 'Init': {'side_effect': self.init_class}}, status.main) - self.assertRaisesCodeEqual(0, context_manager.exception.code) + self.assertEqual(0, context_manager.exception.code) self.assertEqual('status: running\n', m_stdout.getvalue()) # vi: ts=4 expandtab syntax=python diff --git a/cloudinit/tests/helpers.py b/cloudinit/tests/helpers.py index 0080c729..41d9a8ee 100644 --- a/cloudinit/tests/helpers.py +++ b/cloudinit/tests/helpers.py @@ -173,17 +173,15 @@ class CiTestCase(TestCase): dir = self.tmp_dir() return os.path.normpath(os.path.abspath(os.path.join(dir, path))) - def assertRaisesCodeEqual(self, expected, found): - """Handle centos6 having different context manager for assertRaises. - with assertRaises(Exception) as e: - raise Exception("BOO") - - centos6 will have e.exception as an integer. - anything nwere will have it as something with a '.code'""" - if isinstance(found, int): - self.assertEqual(expected, found) - else: - self.assertEqual(expected, found.code) + def sys_exit(self, code): + """Provide a wrapper around sys.exit for python 2.6 + + In 2.6, this code would produce 'cm.exception' with value int(2) + rather than the SystemExit that was raised by sys.exit(2). + with assertRaises(SystemExit) as cm: + sys.exit(2) + """ + raise SystemExit(code) class ResourceUsingTestCase(CiTestCase): diff --git a/tests/unittests/test_handler/test_schema.py b/tests/unittests/test_handler/test_schema.py index 648573f6..df67a0e0 100644 --- a/tests/unittests/test_handler/test_schema.py +++ b/tests/unittests/test_handler/test_schema.py @@ -336,11 +336,13 @@ class MainTest(CiTestCase): def test_main_missing_args(self): """Main exits non-zero and reports an error on missing parameters.""" - with mock.patch('sys.argv', ['mycmd']): - with mock.patch('sys.stderr', new_callable=StringIO) as m_stderr: - with self.assertRaises(SystemExit) as context_manager: - main() - self.assertEqual('1', str(context_manager.exception)) + with mock.patch('sys.exit', side_effect=self.sys_exit): + with mock.patch('sys.argv', ['mycmd']): + with mock.patch('sys.stderr', new_callable=StringIO) as \ + m_stderr: + with self.assertRaises(SystemExit) as context_manager: + main() + self.assertEqual(1, context_manager.exception.code) self.assertEqual( 'Expected either --config-file argument or --doc\n', m_stderr.getvalue()) diff --git a/tools/run-centos b/tools/run-centos index 6ac6c11f..cb241ee5 100755 --- a/tools/run-centos +++ b/tools/run-centos @@ -313,7 +313,8 @@ main() { if [ -n "$unittest" ]; then debug 1 "running unit tests." - inside_as_cd "$name" "$user" "$cdir" nosetests tests/unittests || + inside_as_cd "$name" "$user" "$cdir" \ + nosetests tests/unittests cloudinit || { errorrc "nosetests failed."; errors=$(($errors+1)); } fi diff --git a/tox.ini b/tox.ini index bb74853f..1f990af4 100644 --- a/tox.ini +++ b/tox.ini @@ -45,7 +45,7 @@ deps = -r{toxinidir}/test-requirements.txt [testenv:py26] deps = -r{toxinidir}/test-requirements.txt -commands = nosetests {posargs:tests/unittests} +commands = nosetests {posargs:tests/unittests cloudinit} setenv = LC_ALL = C @@ -83,7 +83,7 @@ deps = [testenv:centos6] basepython = python2.6 -commands = nosetests {posargs:tests/unittests} +commands = nosetests {posargs:tests/unittests cloudinit} deps = # requirements argparse==1.2.1 @@ -98,7 +98,7 @@ deps = [testenv:opensusel42] basepython = python2.7 -commands = nosetests {posargs:tests/unittests} +commands = nosetests {posargs:tests/unittests cloudinit} deps = # requirements argparse==1.3.0 -- cgit v1.2.3 From 42923ffbbf6a84819224417f0e032497af60d0cd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 12 Feb 2018 15:10:25 -0700 Subject: ds-identify: check /writable/system-data/ for nocloud seed. Ubuntu core seeds information to nocloud via a bind-mount of /writable/system-data/var/lib/cloud over /var/lib/cloud. When ds-identify runs as a systemd generator that mount is not guaranteed to have been done. It is guaranteed at cloud-init-local.service time, but not generator time. Images built with 'ubuntu-image --cloud-init=user-data-file' would have cloud-init disabled. The fix here is just to consider the seed dir under /writable/system-data. LP: #1747070 --- tests/unittests/test_ds_identify.py | 24 ++++++++++++++++++++++++ tools/ds-identify | 11 +++++++++++ 2 files changed, 35 insertions(+) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 31cc6223..03942de0 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -359,6 +359,14 @@ class TestDsIdentify(CiTestCase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_seed(self): + """Nocloud seed directory.""" + self._test_ds_found('NoCloud-seed') + + def test_nocloud_seed_ubuntu_core_writable(self): + """Nocloud seed directory ubuntu core writable""" + self._test_ds_found('NoCloud-seed-ubuntu-core') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -454,6 +462,22 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloud-seed': { + 'ds': 'NoCloud', + 'files': { + os.path.join(P_SEED_DIR, 'nocloud', 'user-data'): 'ud\n', + os.path.join(P_SEED_DIR, 'nocloud', 'meta-data'): 'md\n', + } + }, + 'NoCloud-seed-ubuntu-core': { + 'ds': 'NoCloud', + 'files': { + os.path.join('writable/system-data', P_SEED_DIR, + 'nocloud-net', 'user-data'): 'ud\n', + os.path.join('writable/system-data', P_SEED_DIR, + 'nocloud-net', 'meta-data'): 'md\n', + } + }, 'OpenStack': { 'ds': 'OpenStack', 'files': {P_PRODUCT_NAME: 'OpenStack Nova\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index cd268242..5f762438 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -470,6 +470,16 @@ check_seed_dir() { return 0 } +check_writable_seed_dir() { + # ubuntu core bind-mounts /writable/system-data/var/lib/cloud + # over the top of /var/lib/cloud, but the mount might not be done yet. + local wdir="/writable/system-data" + [ -d "${PATH_ROOT}$wdir" ] || return 1 + local sdir="${PATH_ROOT}$wdir${PATH_VAR_LIB_CLOUD#${PATH_ROOT}}" + local PATH_VAR_LIB_CLOUD="$sdir" + check_seed_dir "$@" +} + probe_floppy() { cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" local fpath=/dev/floppy @@ -569,6 +579,7 @@ dscheck_NoCloud() { esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} + check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done if has_fs_with_label "${fslabel}"; then return ${DS_FOUND} -- cgit v1.2.3 From b7497e807fa12a26d4a12aaf1ee9302a4fd24728 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 21 Feb 2018 16:09:14 -0500 Subject: ds-identify: Fix searching for iso9660 OVF cdroms. This fixes a bug in parsing of 'blkid -o export' output. The result of the bug meant that DI_ISO9660_DEVS did not get set correctly and is_cdrom_ovf would not identify devices in most cases. The tests are improved to demonstrate both multiple iso devices and also a cdrom that doesn't sort "last" in blkid output. The code change is to use DEVNAME as the record separator when parsing blkid -o export rather than relying on being able to read the empty line. LP: #1749980 --- tests/unittests/test_ds_identify.py | 9 ++++++--- tools/ds-identify | 39 ++++++++++++++++++++----------------- 2 files changed, 27 insertions(+), 21 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 03942de0..21258347 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -350,8 +350,10 @@ class TestDsIdentify(CiTestCase): "OVFENV", "ovfenv"] for valid_ovf_label in valid_ovf_labels: ovf_cdrom_by_label['mocks'][0]['out'] = blkid_out([ + {'DEVNAME': 'sda1', 'TYPE': 'ext4', 'LABEL': 'rootfs'}, {'DEVNAME': 'sr0', 'TYPE': 'iso9660', - 'LABEL': valid_ovf_label}]) + 'LABEL': valid_ovf_label}, + {'DEVNAME': 'vda1', 'TYPE': 'ntfs', 'LABEL': 'data'}]) self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) @@ -513,8 +515,9 @@ VALID_CFG = { 'mocks': [ {'name': 'blkid', 'ret': 0, 'out': blkid_out( - [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, - {'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}]) + [{'DEVNAME': 'sr0', 'TYPE': 'iso9660', 'LABEL': ''}, + {'DEVNAME': 'sr1', 'TYPE': 'iso9660', 'LABEL': 'ignoreme'}, + {'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}]), }, MOCK_VIRT_IS_VMWARE, ], diff --git a/tools/ds-identify b/tools/ds-identify index 5f762438..5da51bcc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -186,7 +186,8 @@ block_dev_with_label() { read_fs_info() { cached "${DI_BLKID_OUTPUT}" && return 0 # do not rely on links in /dev/disk which might not be present yet. - # note that older blkid versions do not report DEVNAME in 'export' output. + # Note that blkid < 2.22 (centos6, trusty) do not output DEVNAME. + # that means that DI_ISO9660_DEVS will not be set. if is_container; then # blkid will in a container, or at least currently in lxd # not provide useful information. @@ -203,21 +204,26 @@ read_fs_info() { DI_ISO9660_DEVS="$UNAVAILABLE:error" return $ret } - IFS="$CR" - set -- $out - IFS="$oifs" - for line in "$@" ""; do + # 'set --' will collapse multiple consecutive entries in IFS for + # whitespace characters (\n, tab, " ") so we cannot rely on getting + # empty lines in "$@" below. + IFS="$CR"; set -- $out; IFS="$oifs" + + for line in "$@"; do case "${line}" in - DEVNAME=*) dev=${line#DEVNAME=};; + DEVNAME=*) + [ -n "$dev" -a "$ftype" = "iso9660" ] && + isodevs="${isodevs} ${dev}=$label" + ftype=""; dev=""; label=""; + dev=${line#DEVNAME=};; LABEL=*) label="${line#LABEL=}"; labels="${labels}${line#LABEL=}${delim}";; TYPE=*) ftype=${line#TYPE=};; - "") if [ "$ftype" = "iso9660" ]; then - isodevs="${isodevs} ${dev}=$label" - fi - ftype=""; devname=""; label=""; esac done + [ -n "$dev" -a "$ftype" = "iso9660" ] && + isodevs="${isodevs} ${dev}=$label" + DI_FS_LABELS="${labels%${delim}}" DI_ISO9660_DEVS="${isodevs# }" } @@ -696,15 +702,12 @@ dscheck_OVF() { # Azure provides ovf. Skip false positive by dis-allowing. is_azure_chassis && return $DS_NOT_FOUND - local isodevs="${DI_ISO9660_DEVS}" - case "$isodevs" in - ""|$UNAVAILABLE:*) return ${DS_NOT_FOUND};; - esac - # DI_ISO9660_DEVS is =label, like /dev/sr0=OVF-TRANSPORT - for tok in $isodevs; do - is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND - done + if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then + for tok in ${DI_ISO9660_DEVS}; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND + done + fi if ovf_vmware_guest_customization; then return ${DS_FOUND} -- cgit v1.2.3 From 3814d559c3e973238d819721605c7451e852fe63 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 13 Feb 2018 13:00:53 -0800 Subject: OVF: Fix VMware support for 64-bit platforms. On few 64-bit platforms, the open-vm-tools package is installed at /usr/lib64/. The DataSourceOVF is changed to search look there for the 'customization plugin' --- cloudinit/sources/DataSourceOVF.py | 21 ++++++++++++++++----- tests/unittests/test_ds_identify.py | 10 ++++++++++ tools/ds-identify | 3 ++- 3 files changed, 28 insertions(+), 6 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6e62f984..dc914a72 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource): "VMware Customization support") elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): - deployPkgPluginPath = search_file("/usr/lib/vmware-tools", - "libdeployPkgPlugin.so") - if not deployPkgPluginPath: - deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", - "libdeployPkgPlugin.so") + + search_paths = ( + "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", + "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") + + plugin = "libdeployPkgPlugin.so" + deployPkgPluginPath = None + for path in search_paths: + deployPkgPluginPath = search_file(path, plugin) + if deployPkgPluginPath: + LOG.debug("Found the customization plugin at %s", + deployPkgPluginPath) + break + if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource): msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) + else: + LOG.debug("Did not find the customization plugin.") if vmwareImcConfigFilePath: LOG.debug("Found VMware Customization Config File at %s", diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 21258347..9be3f964 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -337,6 +337,16 @@ class TestDsIdentify(CiTestCase): """OVF is identified when vmware customization is enabled.""" self._test_ds_found('OVF-vmware-customization') + def test_ovf_on_vmware_iso_found_open_vm_tools_64(self): + """OVF is identified when open-vm-tools installed in /usr/lib64.""" + cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) + p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' + open64 = 'usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so' + cust64['files'][open64] = cust64['files'][p32] + del cust64['files'][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) diff --git a/tools/ds-identify b/tools/ds-identify index 5da51bcc..ec368d58 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -650,8 +650,9 @@ ovf_vmware_guest_customization() { # we have to have the plugin to do vmware customization local found="" pkg="" pre="${PATH_ROOT}/usr/lib" + local ppath="plugins/vmsvc/libdeployPkgPlugin.so" for pkg in vmware-tools open-vm-tools; do - if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then + if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then found="$pkg"; break; fi done -- cgit v1.2.3 From e88e35483e373b39b4485f30f7a867f50571027c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 19 Mar 2018 14:50:54 -0400 Subject: Add Hetzner Cloud DataSource The Hetzner Cloud metadata service is an AWS-style service available over HTTP via the link local address 169.254.169.254. https://hetzner.com/cloud https://docs.hetzner.cloud/ --- cloudinit/apport.py | 6 +- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceHetzner.py | 100 ++++++++++++++++++++++++ cloudinit/sources/helpers/hetzner.py | 26 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_hetzner.py | 99 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++ tools/ds-identify | 7 +- 8 files changed, 246 insertions(+), 4 deletions(-) create mode 100644 cloudinit/sources/DataSourceHetzner.py create mode 100644 cloudinit/sources/helpers/hetzner.py create mode 100644 tests/unittests/test_datasource/test_hetzner.py (limited to 'tools') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 221f341c..618b0160 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -14,9 +14,9 @@ except ImportError: KNOWN_CLOUD_NAMES = [ 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', - 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', - 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', - 'VMware', 'Other'] + 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', + 'Scaleway', 'SmartOS', 'VMware', 'Other'] # Potentially clear text collected logs CLOUDINIT_LOG = '/var/log/cloud-init.log' diff --git a/cloudinit/settings.py b/cloudinit/settings.py index c120498f..5fe749d4 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -36,6 +36,7 @@ CFG_BUILTIN = { 'SmartOS', 'Bigstep', 'Scaleway', + 'Hetzner', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py new file mode 100644 index 00000000..769fe131 --- /dev/null +++ b/cloudinit/sources/DataSourceHetzner.py @@ -0,0 +1,100 @@ +# Author: Jonas Keidel +# Author: Markus Schade +# +# This file is part of cloud-init. See LICENSE file for license information. +# +"""Hetzner Cloud API Documentation. + https://docs.hetzner.cloud/""" + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.hetzner as hc_helper + +LOG = logging.getLogger(__name__) + +BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' + +BUILTIN_DS_CONFIG = { + 'metadata_url': BASE_URL_V1 + '/metadata', + 'userdata_url': BASE_URL_V1 + '/userdata', +} + +MD_RETRIES = 60 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 + + +class DataSourceHetzner(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro + self.metadata = dict() + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), + BUILTIN_DS_CONFIG]) + self.metadata_address = self.ds_cfg['metadata_url'] + self.userdata_address = self.ds_cfg['userdata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) + self._network_config = None + self.dsmode = sources.DSMODE_NETWORK + + def get_data(self): + nic = cloudnet.find_fallback_nic() + with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, + "169.254.255.255"): + md = hc_helper.read_metadata( + self.metadata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) + ud = hc_helper.read_userdata( + self.userdata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) + + self.userdata_raw = ud + self.metadata_full = md + + """hostname is name provided by user at launch. The API enforces + it is a valid hostname, but it is not guaranteed to be resolvable + in dns or fully qualified.""" + self.metadata['instance-id'] = md['instance-id'] + self.metadata['local-hostname'] = md['hostname'] + self.metadata['network-config'] = md.get('network-config', None) + self.metadata['public-keys'] = md.get('public-keys', None) + self.vendordata_raw = md.get("vendor_data", None) + + return True + + @property + def network_config(self): + """Configure the networking. This needs to be done each boot, since + the IP information may have changed due to snapshot and/or + migration. + """ + + if self._network_config: + return self._network_config + + _net_config = self.metadata['network-config'] + if not _net_config: + raise Exception("Unable to get meta-data from server....") + + self._network_config = _net_config + + return self._network_config + + +# Used to match classes to dependencies +datasources = [ + (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py new file mode 100644 index 00000000..2554530d --- /dev/null +++ b/cloudinit/sources/helpers/hetzner.py @@ -0,0 +1,26 @@ +# Author: Jonas Keidel +# Author: Markus Schade +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return util.load_yaml(response.contents.decode()) + + +def read_userdata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read userdata at %s" % url) + return response.contents diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 80b9c650..6d2dc5b5 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -14,6 +14,7 @@ from cloudinit.sources import ( DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, DataSourceGCE as GCE, + DataSourceHetzner as Hetzner, DataSourceMAAS as MAAS, DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, @@ -31,6 +32,7 @@ DEFAULT_LOCAL = [ CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, DigitalOcean.DataSourceDigitalOcean, + Hetzner.DataSourceHetzner, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, OVF.DataSourceOVF, diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py new file mode 100644 index 00000000..f1d1525e --- /dev/null +++ b/tests/unittests/test_datasource/test_hetzner.py @@ -0,0 +1,99 @@ +# Copyright (C) 2018 Jonas Keidel +# +# Author: Jonas Keidel +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceHetzner +from cloudinit import util, settings, helpers + +from cloudinit.tests.helpers import mock, CiTestCase + +METADATA = util.load_yaml(""" +hostname: cloudinit-test +instance-id: 123456 +local-ipv4: '' +network-config: + config: + - mac_address: 96:00:00:08:19:da + name: eth0 + subnets: + - dns_nameservers: + - 213.133.99.99 + - 213.133.100.100 + - 213.133.98.98 + ipv4: true + type: dhcp + type: physical + - name: eth0:0 + subnets: + - address: 2a01:4f8:beef:beef::1/64 + gateway: fe80::1 + ipv6: true + routes: + - gateway: fe80::1%eth0 + netmask: 0 + network: '::' + type: static + type: physical + version: 1 +network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ + ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ + IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ + IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ + DNS1=213.133.99.99\nDNS2=213.133.100.100\n" +public-ipv4: 192.168.0.1 +public-keys: +- ssh-ed25519 \ + AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ + test-key@workstation +vendor_data: "test" +""") + +USERDATA = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + + +class TestDataSourceHetzner(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceHetzner, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self): + ds = DataSourceHetzner.DataSourceHetzner( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + return ds + + @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') + def test_read_data(self, m_usermd, m_readmd, m_fallback_nic, m_net): + m_readmd.return_value = METADATA.copy() + m_usermd.return_value = USERDATA + m_fallback_nic.return_value = 'eth0' + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + m_net.assert_called_once_with( + 'eth0', '169.254.0.1', + 16, '169.254.255.255' + ) + + self.assertTrue(m_readmd.called) + + self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) + + self.assertEqual(METADATA.get('public-keys'), + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + self.assertEqual(ds.get_userdata_raw(), USERDATA) + self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 9be3f964..9c5628e7 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -60,6 +60,7 @@ P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" P_PRODUCT_NAME = "sys/class/dmi/id/product_name" P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" @@ -379,6 +380,10 @@ class TestDsIdentify(CiTestCase): """Nocloud seed directory ubuntu core writable""" self._test_ds_found('NoCloud-seed-ubuntu-core') + def test_hetzner_found(self): + """Hetzner cloud is identified in sys_vendor.""" + self._test_ds_found('Hetzner') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -559,6 +564,10 @@ VALID_CFG = { }, ], }, + 'Hetzner': { + 'ds': 'Hetzner', + 'files': {P_SYS_VENDOR: 'Hetzner\n'}, + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index ec368d58..e3f93c90 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -114,7 +114,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway" +OVF SmartOS Scaleway Hetzner" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -979,6 +979,11 @@ dscheck_Scaleway() { return ${DS_NOT_FOUND} } +dscheck_Hetzner() { + dmi_sys_vendor_is Hetzner && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From 7713713265224d9f34410028de6bd50d1451aed9 Mon Sep 17 00:00:00 2001 From: Kurt Garloff Date: Wed, 21 Mar 2018 20:07:29 -0600 Subject: Identify OpenTelekomCloud Xen as OpenStack DS. Open Telekom Cloud gen1 (Xen) hosts do not provide nova product names in DMI but Xen HVM domU. They can however be safely identified by the OpenTelekomCloud Chassis asset tag. OpenTelekomCloud does use the network OpenStack DataSource, so we better detect it. LP: #1756471 --- tests/unittests/test_ds_identify.py | 11 +++++++++++ tools/ds-identify | 4 ++++ 2 files changed, 15 insertions(+) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 9c5628e7..85999b7a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -66,6 +66,7 @@ P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} +MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} MOCK_UNAME_IS_PPC64 = {'name': 'uname', 'out': UNAME_PPC64EL, 'ret': 0} @@ -291,6 +292,10 @@ class TestDsIdentify(CiTestCase): """On Intel, openstack must be identified.""" self._test_ds_found('OpenStack') + def test_openstack_open_telekom_cloud(self): + """Open Telecom identification.""" + self._test_ds_found('OpenStack-OpenTelekom') + def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. @@ -502,6 +507,12 @@ VALID_CFG = { 'policy_dmi': POLICY_FOUND_ONLY, 'policy_no_dmi': POLICY_FOUND_ONLY, }, + 'OpenStack-OpenTelekom': { + # OTC gen1 (Xen) hosts use OpenStack datasource, LP: #1756471 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, 'OVF-seed': { 'ds': 'OVF', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index e3f93c90..e2552c8b 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -894,6 +894,10 @@ dscheck_OpenStack() { return ${DS_FOUND} fi + if dmi_chassis_asset_tag_matches "OpenTelekomCloud"; then + return ${DS_FOUND} + fi + # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; -- cgit v1.2.3 From 68d798bb793052f589a7e48c508aca9031c7e271 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 23 Mar 2018 13:51:17 -0600 Subject: tests: remove jsonschema from xenial tox environment. Ubuntu 16.04 (xenial) does not have jsonschema installed by default. As it is listed in requirements, the tox environment will always have it installed. Add the helper tools/pipremove that removes pip packages. Then use that to remove jsonschema without noise of always running and ignoring a 'pip uninstall jsonschema'. --- tools/pipremove | 14 ++++++++++++++ tox.ini | 3 +++ 2 files changed, 17 insertions(+) create mode 100755 tools/pipremove (limited to 'tools') diff --git a/tools/pipremove b/tools/pipremove new file mode 100755 index 00000000..f8f4ff11 --- /dev/null +++ b/tools/pipremove @@ -0,0 +1,14 @@ +#!/usr/bin/python3 +import subprocess +import sys + +for pkg in sys.argv[1:]: + try: + exec('import %s' % pkg) # pylint: disable=W0122 + except ImportError: + continue + sys.stderr.write("%s removing package %s\n" % (sys.argv[0], pkg)) + ret = subprocess.Popen(['pip', 'uninstall', '--yes', pkg]).wait() + if ret != 0: + sys.stderr.write("Failed to uninstall %s (%d)\n" % (pkg, ret)) + sys.exit(ret) diff --git a/tox.ini b/tox.ini index 1f990af4..818ade3d 100644 --- a/tox.ini +++ b/tox.ini @@ -60,6 +60,9 @@ deps = sphinx commands = {envpython} -m sphinx {posargs:doc/rtd doc/rtd_html} [testenv:xenial] +commands = + python ./tools/pipremove jsonschema + python -m nose {posargs:tests/unittests cloudinit} basepython = python3 deps = # requirements -- cgit v1.2.3 From e0f644b7c8c76bd63d242558685722cc70d9c51d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 23 Mar 2018 17:17:55 -0600 Subject: IBMCloud: Initial IBM Cloud datasource. This adds a specific IBM Cloud datasource. IBM Cloud is identified by: a.) running on xen b.) one of a LABEL=METADATA disk or a LABEL=config-2 disk with UUID=9796-932E The datasource contains its own config-drive reader that reads only the currently supported portion of config-drive needed for ibm cloud. During the provisioning boot, cloud-init is disabled. See the docstring in DataSourceIBMCloud.py for more more information. --- cloudinit/sources/DataSourceConfigDrive.py | 10 + cloudinit/sources/DataSourceIBMCloud.py | 325 +++++++++++++++++++++++ cloudinit/tests/test_util.py | 72 +++++ cloudinit/util.py | 31 +++ tests/unittests/test_datasource/test_ibmcloud.py | 262 ++++++++++++++++++ tests/unittests/test_ds_identify.py | 104 +++++++- tools/ds-identify | 65 ++++- 7 files changed, 857 insertions(+), 12 deletions(-) create mode 100644 cloudinit/sources/DataSourceIBMCloud.py create mode 100644 tests/unittests/test_datasource/test_ibmcloud.py (limited to 'tools') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8db6267..c7b5fe5f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -14,6 +14,7 @@ from cloudinit import util from cloudinit.net import eni +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform from cloudinit.sources.helpers import openstack LOG = logging.getLogger(__name__) @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True): # an unpartitioned block device (ex sda, not sda1) devices = [d for d in candidates if d in by_label or not util.is_partition(d)] + + if devices: + # IBMCloud uses config-2 label, but limited to a single UUID. + ibm_platform, ibm_path = get_ibm_platform() + if ibm_path in devices: + devices.remove(ibm_path) + LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", + ibm_path, ibm_platform) + return devices diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py new file mode 100644 index 00000000..02b3d56f --- /dev/null +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -0,0 +1,325 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for IBMCloud. + +IBMCloud is also know as SoftLayer or BlueMix. +IBMCloud hypervisor is xen (2018-03-10). + +There are 2 different api exposed launch methods. + * template: This is the legacy method of launching instances. + When booting from an image template, the system boots first into + a "provisioning" mode. There, host <-> guest mechanisms are utilized + to execute code in the guest and provision it. + + Cloud-init will disable itself when it detects that it is in the + provisioning mode. It detects this by the presence of + a file '/root/provisioningConfiguration.cfg'. + + When provided with user-data, the "first boot" will contain a + ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data + provided, then there is no data-source. + + Cloud-init never does any network configuration in this mode. + + * os_code: Essentially "launch by OS Code" (Operating System Code). + This is a more modern approach. There is no specific "provisioning" boot. + Instead, cloud-init does all the customization. With or without + user-data provided, an OpenStack ConfigDrive like disk is attached. + + Only disks with label 'config-2' and UUID '9796-932E' are considered. + This is to avoid this datasource claiming ConfigDrive. This does + mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be + incorrectly identified as IBMCloud. + +TODO: + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? + it seems it is not the same as data's uuid in the os_code case + but is in the template case. + +""" +import base64 +import json +import os + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit.sources.helpers import openstack +from cloudinit import util + +LOG = logging.getLogger(__name__) + +IBM_CONFIG_UUID = "9796-932E" + + +class Platforms(object): + TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" + TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." + TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" + TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" + OS_CODE = "OS-Code/Live" + + +PROVISIONING = ( + Platforms.TEMPLATE_PROVISIONING_METADATA, + Platforms.TEMPLATE_PROVISIONING_NODATA) + + +class DataSourceIBMCloud(sources.DataSource): + + dsname = 'IBMCloud' + system_uuid = None + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) + self.source = None + self._network_config = None + self.network_json = None + self.platform = None + + def __str__(self): + root = super(DataSourceIBMCloud, self).__str__() + mstr = "%s [%s %s]" % (root, self.platform, self.source) + return mstr + + def _get_data(self): + results = read_md() + if results is None: + return False + + self.source = results['source'] + self.platform = results['platform'] + self.metadata = results['metadata'] + self.userdata_raw = results.get('userdata') + self.network_json = results.get('networkdata') + vd = results.get('vendordata') + self.vendordata_pure = vd + self.system_uuid = results['system-uuid'] + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warning("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + + return True + + def check_instance_id(self, sys_cfg): + """quickly (local check only) if self.instance_id is still valid + + in Template mode, the system uuid (/sys/hypervisor/uuid) is the + same as found in the METADATA disk. But that is not true in OS_CODE + mode. So we read the system_uuid and keep that for later compare.""" + if self.system_uuid is None: + return False + return self.system_uuid == _read_system_uuid() + + @property + def network_config(self): + if self.platform != Platforms.OS_CODE: + # If deployed from template, an agent in the provisioning + # environment handles networking configuration. Not cloud-init. + return {'config': 'disabled', 'version': 1} + if self._network_config is None: + if self.network_json is not None: + LOG.debug("network config provided via network_json") + self._network_config = openstack.convert_net_json( + self.network_json, known_macs=None) + else: + LOG.debug("no network configuration available.") + return self._network_config + + +def _read_system_uuid(): + uuid_path = "/sys/hypervisor/uuid" + if not os.path.isfile(uuid_path): + return None + return util.load_file(uuid_path).strip().lower() + + +def _is_xen(): + return os.path.exists("/proc/xen") + + +def _is_ibm_provisioning(): + return os.path.exists("/root/provisioningConfiguration.cfg") + + +def get_ibm_platform(): + """Return a tuple (Platform, path) + + If this is Not IBM cloud, then the return value is (None, None). + An instance in provisioning mode is considered running on IBM cloud.""" + label_mdata = "METADATA" + label_cfg2 = "CONFIG-2" + not_found = (None, None) + + if not _is_xen(): + return not_found + + # fslabels contains only the first entry with a given label. + fslabels = {} + try: + devs = util.blkid() + except util.ProcessExecutionError as e: + LOG.warning("Failed to run blkid: %s", e) + return (None, None) + + for dev in sorted(devs.keys()): + data = devs[dev] + label = data.get("LABEL", "").upper() + uuid = data.get("UUID", "").upper() + if label not in (label_mdata, label_cfg2): + continue + if label in fslabels: + LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", + label, fslabels[label], data) + continue + if label == label_cfg2 and uuid != IBM_CONFIG_UUID: + LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", + dev, label, uuid, data) + continue + fslabels[label] = data + + metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') + cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') + + if cfg2_path: + return (Platforms.OS_CODE, cfg2_path) + elif metadata_path: + if _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) + else: + return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) + elif _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) + return not_found + + +def read_md(): + """Read data from IBM Cloud. + + @return: None if not running on IBM Cloud. + dictionary with guaranteed fields: metadata, version + and optional fields: userdata, vendordata, networkdata. + Also includes the system uuid from /sys/hypervisor/uuid.""" + platform, path = get_ibm_platform() + if platform is None: + LOG.debug("This is not an IBMCloud platform.") + return None + elif platform in PROVISIONING: + LOG.debug("Cloud-init is disabled during provisioning: %s.", + platform) + return None + + ret = {'platform': platform, 'source': path, + 'system-uuid': _read_system_uuid()} + + try: + if os.path.isdir(path): + results = metadata_from_dir(path) + else: + results = util.mount_cb(path, metadata_from_dir) + except BrokenMetadata as e: + raise RuntimeError( + "Failed reading IBM config disk (platform=%s path=%s): %s" % + (platform, path, e)) + + ret.update(results) + return ret + + +class BrokenMetadata(IOError): + pass + + +def metadata_from_dir(source_dir): + """Walk source_dir extracting standardized metadata. + + Certain metadata keys are renamed to present a standardized set of metadata + keys. + + This function has a lot in common with ConfigDriveReader.read_v2 but + there are a number of inconsistencies, such key renames and as only + presenting a 'latest' version which make it an unlikely candidate to share + code. + + @return: Dict containing translated metadata, userdata, vendordata, + networkdata as present. + """ + + def opath(fname): + return os.path.join("openstack", "latest", fname) + + def load_json_bytes(blob): + return json.loads(blob.decode('utf-8')) + + files = [ + # tuples of (results_name, path, translator) + ('metadata_raw', opath('meta_data.json'), load_json_bytes), + ('userdata', opath('user_data'), None), + ('vendordata', opath('vendor_data.json'), load_json_bytes), + ('networkdata', opath('network_data.json'), load_json_bytes), + ] + + results = {} + for (name, path, transl) in files: + fpath = os.path.join(source_dir, path) + raw = None + try: + raw = util.load_file(fpath, decode=False) + except IOError as e: + LOG.debug("Failed reading path '%s': %s", fpath, e) + + if raw is None or transl is None: + data = raw + else: + try: + data = transl(raw) + except Exception as e: + raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + + results[name] = data + + if results.get('metadata_raw') is None: + raise BrokenMetadata( + "%s missing required file 'meta_data.json'" % source_dir) + + results['metadata'] = {} + + md_raw = results['metadata_raw'] + md = results['metadata'] + if 'random_seed' in md_raw: + try: + md['random_seed'] = base64.b64decode(md_raw['random_seed']) + except (ValueError, TypeError) as e: + raise BrokenMetadata( + "Badly formatted metadata random_seed entry: %s" % e) + + renames = ( + ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), + ('uuid', 'instance-id')) + for mdname, newname in renames: + if mdname in md_raw: + md[newname] = md_raw[mdname] + + return results + + +# Used to match classes to dependencies +datasources = [ + (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') + args = parser.parse_args() + data = read_md() + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index d30643dc..3f37dbb6 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -3,6 +3,7 @@ """Tests for cloudinit.util""" import logging +from textwrap import dedent import cloudinit.util as util @@ -140,4 +141,75 @@ class TestGetHostnameFqdn(CiTestCase): [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], mycloud.calls) + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent("""\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """) + + maxDiff = None + + def _get_expected(self): + return ({ + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"]}, + "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"]}, + "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"]}, + "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"]}, + }) + + @mock.patch("cloudinit.util.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, + decode="replace") + + @mock.patch("cloudinit.util.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), + util.blkid(disable_cache=True)) + m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, decode="replace") + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index cae8b196..fb4ee5fe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1237,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device', return entries +def blkid(devs=None, disable_cache=False): + """Get all device tags details from blkid. + + @param devs: Optional list of device paths you wish to query. + @param disable_cache: Bool, set True to start with clean cache. + + @return: Dict of key value pairs of info for the device. + """ + if devs is None: + devs = [] + else: + devs = list(devs) + + cmd = ['blkid', '-o', 'full'] + if disable_cache: + cmd.extend(['-c', '/dev/null']) + cmd.extend(devs) + + # we have to decode with 'replace' as shelx.split (called by + # load_shell_content) can't take bytes. So this is potentially + # lossy of non-utf-8 chars in blkid output. + out, _ = subp(cmd, capture=True, decode="replace") + ret = {} + for line in out.splitlines(): + dev, _, data = line.partition(":") + ret[dev] = load_shell_content(data) + ret[dev]["DEVNAME"] = dev + + return ret + + def peek_file(fname, max_bytes): LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) with open(fname, 'rb') as ifh: diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py new file mode 100644 index 00000000..621cfe49 --- /dev/null +++ b/tests/unittests/test_datasource/test_ibmcloud.py @@ -0,0 +1,262 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceIBMCloud as ibm +from cloudinit.tests import helpers as test_helpers + +import base64 +import copy +import json +import mock +from textwrap import dedent + +D_PATH = "cloudinit.sources.DataSourceIBMCloud." + + +class TestIBMCloud(test_helpers.CiTestCase): + """Test the datasource.""" + def setUp(self): + super(TestIBMCloud, self).setUp() + pass + + +@mock.patch(D_PATH + "_is_xen", return_value=True) +@mock.patch(D_PATH + "_is_ibm_provisioning") +@mock.patch(D_PATH + "util.blkid") +class TestGetIBMPlatform(test_helpers.CiTestCase): + """Test the get_ibm_platform helper.""" + + blkid_base = { + "/dev/xvda1": { + "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", + "TYPE": "ext3"}, + "/dev/xvda2": { + "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", + "TYPE": "ext4"}, + } + + blkid_metadata_disk = { + "/dev/xvdh1": { + "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01"}, + } + + blkid_oscode_disk = { + "/dev/xvdh": { + "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + } + + def setUp(self): + self.blkid_metadata = copy.deepcopy(self.blkid_base) + self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) + + self.blkid_oscode = copy.deepcopy(self.blkid_base) + self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) + + def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_LIVE_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = False + self.assertEqual( + (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_NODATA.""" + m_blkid.return_value = self.blkid_base + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), + ibm.get_ibm_platform()) + + def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): + """Identify OS_CODE.""" + m_blkid.return_value = self.blkid_oscode + m_is_prov.return_value = False + self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), + ibm.get_ibm_platform()) + + def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): + """Test against false positive on openstack with non-ibm UUID.""" + blkid = self.blkid_oscode + blkid["/dev/xvdh"]["UUID"] = "9999-9999" + m_blkid.return_value = blkid + m_is_prov.return_value = False + self.assertEqual((None, None), ibm.get_ibm_platform()) + + +@mock.patch(D_PATH + "_read_system_uuid", return_value=None) +@mock.patch(D_PATH + "get_ibm_platform") +class TestReadMD(test_helpers.CiTestCase): + """Test the read_datasource helper.""" + + template_md = { + "files": [], + "network_config": {"content_path": "/content/interfaces"}, + "hostname": "ci-fond-ram", + "name": "ci-fond-ram", + "domain": "testing.ci.cloud-init.org", + "meta": {"dsmode": "net"}, + "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", + "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, + } + + oscode_md = { + "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", + "name": "ci-grand-gannet", + "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", + "random_seed": "bm90LXJhbmRvbQo=", + "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", + "configuration_token": "eyJhbGciOi..M3ZA", + "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, + } + + content_interfaces = dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + allow-hotplug eth0 + iface eth0 inet static + address 10.82.43.5 + netmask 255.255.255.192 + """) + + userdata = b"#!/bin/sh\necho hi mom\n" + # meta.js file gets json encoded userdata as a list. + meta_js = '["#!/bin/sh\necho hi mom\n"]' + vendor_data = { + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + + network_data = { + "links": [ + {"id": "interface_29402281", "name": "eth0", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, + {"id": "interface_29402279", "name": "eth1", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + ], + "networks": [ + {"id": "network_109887563", "link": "interface_29402281", + "type": "ipv4", "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + {"network": "10.0.0.0", "netmask": "255.0.0.0", + "gateway": "10.82.43.1"}, + {"network": "161.26.0.0", "netmask": "255.255.0.0", + "gateway": "10.82.43.1"}]}, + {"id": "network_109887551", "link": "interface_29402279", + "type": "ipv4", "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + {"network": "0.0.0.0", "netmask": "0.0.0.0", + "gateway": "108.168.194.249"}]} + ], + "services": [ + {"type": "dns", "address": "10.0.80.11"}, + {"type": "dns", "address": "10.0.80.12"} + ], + } + + sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + + def _get_expected_metadata(self, os_md): + """return expected 'metadata' for data loaded from meta_data.json.""" + os_md = copy.deepcopy(os_md) + renames = ( + ('hostname', 'local-hostname'), + ('uuid', 'instance-id'), + ('public_keys', 'public-keys')) + ret = {} + for osname, mdname in renames: + if osname in os_md: + ret[mdname] = os_md[osname] + if 'random_seed' in os_md: + ret['random_seed'] = base64.b64decode(os_md['random_seed']) + + return ret + + def test_provisioning_md(self, m_platform, m_sysuuid): + """Provisioning env with a metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + self.assertIsNone(ibm.read_md()) + + def test_provisioning_no_metadata(self, m_platform, m_sysuuid): + """Provisioning env with no metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + self.assertIsNone(ibm.read_md()) + + def test_provisioning_not_ibm(self, m_platform, m_sysuuid): + """Provisioning env but not identified as IBM should return None.""" + m_platform.return_value = (None, None) + self.assertIsNone(ibm.read_md()) + + def test_template_live(self, m_platform, m_sysuuid): + """Template live environment should be identified.""" + tmpdir = self.tmp_dir() + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + m_sysuuid.return_value = self.sysuuid + + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.template_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/content/interfaces': self.content_interfaces, + 'meta.js': self.meta_js}) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, + ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.template_md), + ret['metadata']) + self.assertEqual(self.sysuuid, ret['system-uuid']) + + def test_os_code_live(self, m_platform, m_sysuuid): + """Verify an os_code metadata path.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + netdata = json.dumps(self.network_data) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + 'openstack/latest/network_data.json': netdata, + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): + """Verify os_code without user-data.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertIsNone(ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 85999b7a..53643989 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -9,6 +9,8 @@ from cloudinit import util from cloudinit.tests.helpers import ( CiTestCase, dir2dict, populate_dir) +from cloudinit.sources import DataSourceIBMCloud as dsibm + UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " @@ -37,8 +39,8 @@ BLKID_UEFI_UBUNTU = [ POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" -DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" -DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled" DI_EC2_STRICT_ID_DEFAULT = "true" OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' @@ -64,6 +66,9 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" +IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg" +IBM_CONFIG_UUID = "9796-932E" + MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} @@ -239,6 +244,57 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDriveUpper') return + def test_ibmcloud_template_userdata_in_provisioning(self): + """Template provisioned with user-data during provisioning stage. + + Template provisioning with user-data has METADATA disk, + datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_userdata(self): + """Template provisioned with user-data first boot. + + Template provisioning with user-data has METADATA disk. + datasource should return found.""" + self._test_ds_found('IBMCloud-metadata') + + def test_ibmcloud_template_no_userdata_in_provisioning(self): + """Template provisioned with no user-data during provisioning. + + no disks attached. Datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_no_userdata(self): + """Template provisioned with no user-data first boot. + + no disks attached. Datasource should return found.""" + self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND) + + def test_ibmcloud_os_code(self): + """Launched by os code always has config-2 disk.""" + self._test_ds_found('IBMCloud-config-2') + + def test_ibmcloud_os_code_different_uuid(self): + """IBM cloud config-2 disks must be explicit match on UUID. + + If the UUID is not 9796-932E then we actually expect ConfigDrive.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + offset = None + for m, d in enumerate(data['mocks']): + if d.get('name') == "blkid": + offset = m + break + if not offset: + raise ValueError("Expected to find 'blkid' mock, but did not.") + data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID, + "DEAD-BEEF") + self._check_via_dict( + data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -452,7 +508,7 @@ VALID_CFG = { }, 'Ec2-xen': { 'ds': 'Ec2', - 'mocks': [{'name': 'detect_virt', 'RET': 'xen', 'ret': 0}], + 'mocks': [MOCK_VIRT_IS_XEN], 'files': { 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' }, @@ -579,6 +635,48 @@ VALID_CFG = { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, }, + 'IBMCloud-metadata': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]), + }, + ], + }, + 'IBMCloud-config-2': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), + 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', + 'UUID': dsibm.IBM_CONFIG_UUID}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), + 'UUID': uuid4()}, + ]), + }, + ], + }, + 'IBMCloud-nodisks': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]), + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index e2552c8b..9a2db5c4 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -92,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_FS_UUIDS="" DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" @@ -114,7 +115,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner" +OVF SmartOS Scaleway Hetzner IBMCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -123,6 +124,8 @@ DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" +_IS_IBM_CLOUD="" + error() { set -- "ERROR:" "$@"; debug 0 "$@" @@ -196,7 +199,7 @@ read_fs_info() { return fi local oifs="$IFS" line="" delim="," - local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" uuids="" out=$(blkid -c /dev/null -o export) || { ret=$? error "failed running [$ret]: blkid -c /dev/null -o export" @@ -219,12 +222,14 @@ read_fs_info() { LABEL=*) label="${line#LABEL=}"; labels="${labels}${line#LABEL=}${delim}";; TYPE=*) ftype=${line#TYPE=};; + UUID=*) uuids="${uuids}${line#UUID=}$delim";; esac done [ -n "$dev" -a "$ftype" = "iso9660" ] && isodevs="${isodevs} ${dev}=$label" DI_FS_LABELS="${labels%${delim}}" + DI_FS_UUIDS="${uuids%${delim}}" DI_ISO9660_DEVS="${isodevs# }" } @@ -437,14 +442,25 @@ dmi_sys_vendor_is() { [ "${DI_DMI_SYS_VENDOR}" = "$1" ] } -has_fs_with_label() { - local label="$1" - case ",${DI_FS_LABELS}," in - *,$label,*) return 0;; +has_fs_with_uuid() { + case ",${DI_FS_UUIDS}," in + *,$1,*) return 0;; esac return 1 } +has_fs_with_label() { + # has_fs_with_label(label1[ ,label2 ..]) + # return 0 if a there is a filesystem that matches any of the labels. + local label="" + for label in "$@"; do + case ",${DI_FS_LABELS}," in + *,$label,*) return 0;; + esac + done + return 1 +} + nocase_equal() { # nocase_equal(a, b) # return 0 if case insenstive comparision a.lower() == b.lower() @@ -583,6 +599,8 @@ dscheck_NoCloud() { case " ${DI_DMI_PRODUCT_SERIAL} " in *\ ds=nocloud*) return ${DS_FOUND};; esac + + is_ibm_cloud && return ${DS_NOT_FOUND} for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} @@ -594,9 +612,8 @@ dscheck_NoCloud() { } check_configdrive_v2() { - if has_fs_with_label "config-2"; then - return ${DS_FOUND} - elif has_fs_with_label "CONFIG-2"; then + is_ibm_cloud && return ${DS_NOT_FOUND} + if has_fs_with_label CONFIG-2 config-2; then return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory @@ -988,6 +1005,36 @@ dscheck_Hetzner() { return ${DS_NOT_FOUND} } +is_ibm_provisioning() { + [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ] +} + +is_ibm_cloud() { + cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD} + local ret=1 + if [ "$DI_VIRT" = "xen" ]; then + if is_ibm_provisioning; then + ret=0 + elif has_fs_with_label METADATA metadata; then + ret=0 + elif has_fs_with_uuid 9796-932E && + has_fs_with_label CONFIG-2 config-2; then + ret=0 + fi + fi + _IS_IBM_CLOUD=$ret + return $ret +} + +dscheck_IBMCloud() { + if is_ibm_provisioning; then + debug 1 "cloud-init disabled during provisioning on IBMCloud" + return ${DS_NOT_FOUND} + fi + is_ibm_cloud && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3