From e55ff8f8ea9abeb7c406b2eec3e91aad8fee6f64 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Jan 2017 16:20:10 -0500 Subject: validate-yaml: use python rather than explicitly python3 The change here is to use '/usr/bin/env python' in validate-yaml.py as all other tools/*.py do. Additionally, change the Makefile to invoke validate-yaml.py with the python that it has selected for other things (PYVER). --- tools/validate-yaml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py index d8bbcfcb..a57ea847 100755 --- a/tools/validate-yaml.py +++ b/tools/validate-yaml.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/env python """Try to read a YAML file and report any errors. """ -- cgit v1.2.3 From a3376d45c83e90150d8de79a2b31282a7d760bd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 20 Jan 2017 09:36:51 -0500 Subject: build: fix running Make on a branch with tags other than master running 'make' on a git branch other than master would fail with complaint that the tools/read-version reported a different version than the code. Change to only consider tags starting with 0-9 in read-version. --- Makefile | 7 ++++--- tools/make-tarball | 2 +- tools/read-version | 2 +- 3 files changed, 6 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/Makefile b/Makefile index ed631cf7..18ec5680 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,8 @@ ifeq ($(distro),) distro = redhat endif -READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version) +READ_VERSION=$(shell $(PYVER) $(CWD)/tools/read-version || \ + echo read-version-failed) CODE_VERSION=$(shell $(PYVER) -c "from cloudinit import version; print(version.version_string())") @@ -62,8 +63,8 @@ test: $(unittests) check_version: @if [ "$(READ_VERSION)" != "$(CODE_VERSION)" ]; then \ - echo "Error: read-version version $(READ_VERSION)" \ - "not equal to code version $(CODE_VERSION)"; exit 2; \ + echo "Error: read-version version '$(READ_VERSION)'" \ + "not equal to code version '$(CODE_VERSION)'"; exit 2; \ else true; fi clean_pyc: diff --git a/tools/make-tarball b/tools/make-tarball index c150dd2f..91c45624 100755 --- a/tools/make-tarball +++ b/tools/make-tarball @@ -35,7 +35,7 @@ while [ $# -ne 0 ]; do done rev=${1:-HEAD} -version=$(git describe ${long_opt} $rev) +version=$(git describe "--match=[0-9]*" ${long_opt} $rev) archive_base="cloud-init-$version" if [ -z "$output" ]; then diff --git a/tools/read-version b/tools/read-version index 3b30b497..ddb28383 100755 --- a/tools/read-version +++ b/tools/read-version @@ -56,7 +56,7 @@ if os.path.isdir(os.path.join(_tdir, ".git")) and which("git"): flags = [] if use_tags: flags = ['--tags'] - cmd = ['git', 'describe'] + flags + cmd = ['git', 'describe', '--match=[0-9]*'] + flags version = tiny_p(cmd).strip() -- cgit v1.2.3 From dc6e7b49bac8b87a38fe57ee621177a8177fa2c0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 25 Jan 2017 14:55:38 -0500 Subject: tools/mock-meta: support python2 or python3 and ipv6 in both. Fix mock-meta to work with python2 or python3. Additionally, it will now listen to ipv6 connections, where previously it would only work with ipv4. --- tools/mock-meta.py | 32 +++++++++++++++++++++----------- 1 file changed, 21 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/mock-meta.py b/tools/mock-meta.py index d74f9e31..a0d99441 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -18,10 +18,10 @@ Then: """ import functools -import httplib import json import logging import os +import socket import random import string import sys @@ -29,7 +29,13 @@ import yaml from optparse import OptionParser -from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler) +try: + from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler + import httplib as hclient +except ImportError: + from http.server import HTTPServer, BaseHTTPRequestHandler + from http import client as hclient + log = logging.getLogger('meta-server') @@ -183,6 +189,10 @@ def get_ssh_keys(): return keys +class HTTPServerV6(HTTPServer): + address_family = socket.AF_INET6 + + class MetaDataHandler(object): def __init__(self, opts): @@ -250,7 +260,7 @@ class MetaDataHandler(object): key_id = int(mybe_key) key_name = key_ids[key_id] except: - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown key id %r" % mybe_key) # Extract the possible sub-params result = traverse(nparams[1:], { @@ -342,13 +352,13 @@ class Ec2Handler(BaseHTTPRequestHandler): return self._get_versions date = segments[0].strip().lower() if date not in self._get_versions(): - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown version format %r" % date) if len(segments) < 2: - raise WebException(httplib.BAD_REQUEST, "No action provided") + raise WebException(hclient.BAD_REQUEST, "No action provided") look_name = segments[1].lower() if look_name not in func_mapping: - raise WebException(httplib.BAD_REQUEST, + raise WebException(hclient.BAD_REQUEST, "Unknown requested data %r" % look_name) base_func = func_mapping[look_name] who = self.address_string() @@ -371,16 +381,16 @@ class Ec2Handler(BaseHTTPRequestHandler): data = func() if not data: data = '' - self.send_response(httplib.OK) + self.send_response(hclient.OK) self.send_header("Content-Type", "binary/octet-stream") self.send_header("Content-Length", len(data)) log.info("Sending data (len=%s):\n%s", len(data), format_text(data)) self.end_headers() - self.wfile.write(data) + self.wfile.write(data.encode()) except RuntimeError as e: log.exception("Error somewhere in the server.") - self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e)) + self.send_error(hclient.INTERNAL_SERVER_ERROR, message=str(e)) except WebException as e: code = e.code log.exception(str(e)) @@ -408,7 +418,7 @@ def extract_opts(): help=("port from which to serve traffic" " (default: %default)")) parser.add_option("-a", "--addr", dest="address", action="store", type=str, - default='0.0.0.0', metavar="ADDRESS", + default='::', metavar="ADDRESS", help=("address from which to serve traffic" " (default: %default)")) parser.add_option("-f", '--user-data-file', dest='user_data_file', @@ -444,7 +454,7 @@ def run_server(): setup_fetchers(opts) log.info("CLI opts: %s", opts) server_address = (opts['address'], opts['port']) - server = HTTPServer(server_address, Ec2Handler) + server = HTTPServerV6(server_address, Ec2Handler) sa = server.socket.getsockname() log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1]) server.serve_forever() -- cgit v1.2.3 From 9698b0ded3d7e72f54513f248d8da41e08472f68 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Sat, 4 Feb 2017 02:24:55 +0000 Subject: Add tools/ds-identify to identify datasources available. ds-identify is run here from the generator. If ds-identify does not see any datasources, it can completely disable cloud-init. The big value in this is that if there is no datasource, no python will ever be loaded, and cloud-init will be disabled.o The default policy being added here is: search,found=all,maybe=all,notfound=disabled That means: - enable (in 'datasource_list') all sources that are found. - if none are found, enable all 'maybe'. - if no maybe are found, then disable cloud-init. On platforms without DMI (everything except for aarch64 and x86), the default 'notfound' setting is 'enabled'. This is because many of the detection mechanisms rely on dmi data, which is present only on x86 and aarch64. --- cloudinit/settings.py | 2 + setup.py | 3 +- systemd/cloud-init-generator | 39 +- tools/ds-identify | 1015 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1056 insertions(+), 3 deletions(-) create mode 100755 tools/ds-identify (limited to 'tools') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1fdd31f..692ff5e5 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -14,6 +14,8 @@ CFG_ENV_NAME = "CLOUD_CFG" # This is expected to be a yaml formatted file CLOUD_CONFIG = '/etc/cloud/cloud.cfg' +RUN_CLOUD_CONFIG = '/run/cloud-init/cloud.cfg' + # What u get if no config is provided CFG_BUILTIN = { 'datasource_list': [ diff --git a/setup.py b/setup.py index 04036078..e6693c90 100755 --- a/setup.py +++ b/setup.py @@ -168,7 +168,8 @@ else: (ETC + '/cloud/templates', glob('templates/*')), (ETC + '/NetworkManager/dispatcher.d/', ['tools/hook-network-manager']), (ETC + '/dhcp/dhclient-exit-hooks.d/', ['tools/hook-dhclient']), - (USR_LIB_EXEC + '/cloud-init', ['tools/uncloud-init', + (USR_LIB_EXEC + '/cloud-init', ['tools/ds-identify', + 'tools/uncloud-init', 'tools/write-ssh-key-fingerprints']), (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]), (USR + '/share/doc/cloud-init/examples', diff --git a/systemd/cloud-init-generator b/systemd/cloud-init-generator index fedb6309..bd9f2678 100755 --- a/systemd/cloud-init-generator +++ b/systemd/cloud-init-generator @@ -6,6 +6,8 @@ DEBUG_LEVEL=1 LOG_D="/run/cloud-init" ENABLE="enabled" DISABLE="disabled" +FOUND="found" +NOTFOUND="notfound" RUN_ENABLED_FILE="$LOG_D/$ENABLE" CLOUD_SYSTEM_TARGET="/lib/systemd/system/cloud-init.target" CLOUD_TARGET_NAME="cloud-init.target" @@ -74,10 +76,30 @@ default() { _RET="$ENABLE" } +check_for_datasource() { + local ds_rc="" dsidentify="/usr/lib/cloud-init/ds-identify" + if [ ! -x "$dsidentify" ]; then + debug 1 "no ds-identify in $dsidentify. _RET=$FOUND" + return 0 + fi + $dsidentify + ds_rc=$? + debug 1 "ds-identify rc=$ds_rc" + if [ "$ds_rc" = "0" ]; then + _RET="$FOUND" + debug 1 "ds-identify _RET=$_RET" + return 0 + fi + _RET="$NOTFOUND" + debug 1 "ds-identify _RET=$_RET" + return 1 +} + main() { local normal_d="$1" early_d="$2" late_d="$3" local target_name="multi-user.target" gen_d="$early_d" local link_path="$gen_d/${target_name}.wants/${CLOUD_TARGET_NAME}" + local ds="$NOTFOUND" debug 1 "$0 normal=$normal_d early=$early_d late=$late_d" debug 2 "$0 $*" @@ -93,7 +115,20 @@ main() { debug 0 "search $search returned $ret" fi done - + + # enable AND ds=found == enable + # enable AND ds=notfound == disable + # disable || == disabled + if [ "$result" = "$ENABLE" ]; then + debug 1 "checking for datasource" + check_for_datasource + ds=$_RET + if [ "$ds" = "$NOTFOUND" ]; then + debug 1 "cloud-init is enabled but no datasource found, disabling" + result="$DISABLE" + fi + fi + if [ "$result" = "$ENABLE" ]; then if [ -e "$link_path" ]; then debug 1 "already enabled: no change needed" @@ -124,7 +159,7 @@ main() { rm -f "$RUN_ENABLED_FILE" fi else - debug 0 "unexpected result '$result'" + debug 0 "unexpected result '$result' 'ds=$ds'" ret=3 fi return $ret diff --git a/tools/ds-identify b/tools/ds-identify new file mode 100755 index 00000000..203eac0d --- /dev/null +++ b/tools/ds-identify @@ -0,0 +1,1015 @@ +#!/bin/sh +# +# ds-identify is configured via /etc/cloud/ds-identify.cfg +# or on the kernel command line. It takes primarily 2 inputs: +# datasource: can specify the datasource that should be used. +# kernel command line option: ci.datasource= +# +# policy: a string that indicates how ds-identify should operate. +# kernel command line option: ci.di.policy= +# default setting is: +# search,found=all,maybe=all,notfound=disable + +# report: write config to /run/cloud-init/cloud.cfg.report (instead of +# /run/cloud-init/cloud.cfg, which effectively makes this dry-run). +# enable: do nothing +# ds-identify writes no config and just exits success +# the caller (cloud-init-generator) then enables cloud-init to run +# just without any aid from ds-identify. +# disable: disable cloud-init +# +# [report,]found=value,maybe=value,notfound=value +# found: (default=first) +# first: use the first found do no further checking +# all: enable all DS_FOUND +# +# maybe: (default=all) +# if nothing returned 'found', then how to handle maybe. +# no network sources are allowed to return 'maybe'. +# all: enable all DS_MAYBE +# none: ignore any DS_MAYBE +# +# notfound: (default=disable) +# disable: disable cloud-init +# enable: enable cloud-init +# +# +# zesty: +# policy: found=first,maybe=all,none=disable +# xenial: +# policy: found=all,maybe=all,none=enable +# and then at a later date + + +set -u +set -f +UNAVAILABLE="unavailable" +CR=" +" +ERROR="error" +DI_ENABLED="enabled" +DI_DISABLED="disabled" + +DI_DEBUG_LEVEL="${DEBUG_LEVEL:-1}" + +PATH_ROOT=${PATH_ROOT:-""} +PATH_RUN=${PATH_RUN:-"${PATH_ROOT}/run"} +PATH_SYS_CLASS_DMI_ID=${PATH_SYS_CLASS_DMI_ID:-${PATH_ROOT}/sys/class/dmi/id} +PATH_SYS_HYPERVISOR=${PATH_SYS_HYPERVISOR:-${PATH_ROOT}/sys/hypervisor} +PATH_SYS_CLASS_BLOCK=${PATH_SYS_CLASS_BLOCK:-${PATH_ROOT}/sys/class/block} +PATH_DEV_DISK="${PATH_DEV_DISK:-${PATH_ROOT}/dev/disk}" +PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" +PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" +PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" +PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" +PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" +PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" +PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} + +DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" +_DI_LOGGED="" + +# set DI_MAIN='noop' in environment to source this file with no main called. +DI_MAIN=${DI_MAIN:-main} + +DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" +DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" +DI_DMI_PRODUCT_NAME="" +DI_DMI_SYS_VENDOR="" +DI_DMI_PRODUCT_SERIAL="" +DI_DMI_PRODUCT_UUID="" +DI_FS_LABELS="" +DI_KERNEL_CMDLINE="" +DI_VIRT="" + +DI_UNAME_KERNEL_NAME="" +DI_UNAME_KERNEL_RELEASE="" +DI_UNAME_KERNEL_VERSION="" +DI_UNAME_MACHINE="" +DI_UNAME_NODENAME="" +DI_UNAME_OPERATING_SYSTEM="" +DI_UNAME_CMD_OUT="" + +DS_FOUND=0 +DS_NOT_FOUND=1 +DS_MAYBE=2 + +DI_DSNAME="" +# this has to match the builtin list in cloud-init, it is what will +# be searched if there is no setting found in config. +DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ +CloudSigma CloudStack DigitalOcean Ec2 OpenNebula OpenStack OVF SmartOS" +DI_DSLIST="" +DI_MODE="" +DI_REPORT="" +DI_ON_FOUND="" +DI_ON_MAYBE="" +DI_ON_NOTFOUND="" + + +error() { + set -- "ERROR:" "$@"; + debug 0 "$@" + stderr "$@" +} +warn() { + set -- "WARN:" "$@" + debug 0 "$@" + stderr "$@" +} + +stderr() { echo "$@" 1>&2; } + +debug() { + local lvl="$1" + shift + [ "$lvl" -gt "${DI_DEBUG_LEVEL}" ] && return + + if [ "$_DI_LOGGED" != "$DI_LOG" ]; then + # first time here, open file descriptor for append + case "$DI_LOG" in + stderr) :;; + ?*/*) + if [ ! -d "${DI_LOG%/*}" ]; then + mkdir -p "${DI_LOG%/*}" || { + stderr "ERROR:" "cannot write to $DI_LOG" + DI_LOG="stderr" + } + fi + esac + if [ "$DI_LOG" = "stderr" ]; then + exec 3>&2 + else + ( exec 3>>"$DI_LOG" ) && exec 3>>"$DI_LOG" || { + stderr "ERROR: failed writing to $DI_LOG. logging to stderr."; + exec 3>&2 + DI_LOG="stderr" + } + fi + _DI_LOGGED="$DI_LOG" + fi + echo "$@" 1>&3 +} + +get_dmi_field() { + local path="${PATH_SYS_CLASS_DMI_ID}/$1" + if [ ! -f "$path" ] || [ ! -r "$path" ]; then + _RET="$UNAVAILABLE" + return + fi + read _RET < "${path}" || _RET="$ERROR" +} + +block_dev_with_label() { + local p="${PATH_DEV_DISK}/by-label/$1" + [ -b "$p" ] || return 1 + _RET=$p + return 0 +} + +read_fs_labels() { + cached "${DI_FS_LABELS}" && return 0 + # do not rely on links in /dev/disk which might not be present yet. + # note that older blkid versions do not report DEVNAME in 'export' output. + local out="" ret=0 oifs="$IFS" line="" delim="," + local labels="" + if is_container; then + # blkid will in a container, or at least currently in lxd + # not provide useful information. + DI_FS_LABELS="$UNAVAILABLE:container" + else + out=$(blkid -c /dev/null -o export) || { + ret=$? + error "failed running [$ret]: blkid -c /dev/null -o export" + return $ret + } + IFS="$CR" + set -- $out + IFS="$oifs" + for line in "$@"; do + case "${line}" in + LABEL=*) labels="${labels}${line#LABEL=}${delim}";; + esac + done + DI_FS_LABELS="${labels%${delim}}" + fi +} + +cached() { + [ -n "$1" ] && _RET="$1" && return || return 1 +} + + +has_cdrom() { + [ -e "${PATH_ROOT}/dev/cdrom" ] +} + +read_virt() { + cached "$DI_VIRT" && return 0 + local out="" r="" virt="${UNAVAILABLE}" + if [ -d /run/systemd ]; then + out=$(systemd-detect-virt 2>&1) + r=$? + if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then + virt="$out" + fi + fi + DI_VIRT=$virt +} + +is_container() { + case "${DI_VIRT}" in + lxc|lxc-libvirt|systemd-nspawn|docker|rkt) return 0;; + *) return 1;; + esac +} + +read_kernel_cmdline() { + cached "${DI_KERNEL_CMDLINE}" && return + local cmdline="" fpath="${PATH_PROC_CMDLINE}" + if is_container; then + local p1path="${PATH_PROC_1_CMDLINE}" x="" + cmdline="${UNAVAILABLE}:container" + if [ -f "$p1path" ] && x=$(tr '\0' ' ' < "$p1path"); then + cmdline=$x + fi + elif [ -f "$fpath" ]; then + read cmdline <"$fpath" + else + cmdline="${UNAVAILABLE}:no-cmdline" + fi + DI_KERNEL_CMDLINE="$cmdline" +} + +read_dmi_sys_vendor() { + cached "${DI_DMI_SYS_VENDOR}" && return + get_dmi_field sys_vendor + DI_DMI_SYS_VENDOR="$_RET" +} + +read_dmi_product_name() { + cached "${DI_DMI_PRODUCT_NAME}" && return + get_dmi_field product_name + DI_DMI_PRODUCT_NAME="$_RET" +} + +read_dmi_product_uuid() { + cached "${DI_DMI_PRODUCT_UUID}" && return + get_dmi_field product_uuid + DI_DMI_PRODUCT_UUID="$_RET" +} + +read_dmi_product_serial() { + cached "${DI_DMI_PRODUCT_SERIAL}" && return + get_dmi_field product_serial + DI_DMI_PRODUCT_SERIAL="$_RET" +} + +read_uname_info() { + # run uname, and parse output. + # uname is tricky to parse as it outputs always in a given order + # independent of option order. kernel-version is known to have spaces. + # 1 -s kernel-name + # 2 -n nodename + # 3 -r kernel-release + # 4.. -v kernel-version(whitespace) + # N-2 -m machine + # N-1 -o operating-system + cached "${DI_UNAME_CMD_OUT}" && return + local out="${1:-}" ret=0 buf="" + if [ -z "$out" ]; then + out=$(uname -snrvmo) || { + ret=$? + error "failed reading uname with 'uname -snrvmo'" + return $ret + } + fi + set -- $out + DI_UNAME_KERNEL_NAME="$1" + DI_UNAME_NODENAME="$2" + DI_UNAME_KERNEL_RELEASE="$3" + shift 3 + while [ $# -gt 2 ]; do + buf="$buf $1" + shift + done + DI_UNAME_KERNEL_VERSION="${buf# }" + DI_UNAME_MACHINE="$1" + DI_UNAME_OPERATING_SYSTEM="$2" + DI_UNAME_CMD_OUT="$out" + return 0 +} + +parse_yaml_array() { + # parse a yaml single line array value ([1,2,3], not key: [1,2,3]). + # supported with or without leading and closing brackets + # ['1'] or [1] + # '1', '2' + local val="$1" oifs="$IFS" ret="" tok="" + val=${val#[} + val=${val%]} + IFS=","; set -- $val; IFS="$oifs" + for tok in "$@"; do + trim "$tok" + unquote "$_RET" + ret="${ret} $_RET" + done + _RET="${ret# }" +} + +read_datasource_list() { + cached "$DI_DSLIST" && return + local dslist="" + # if DI_DSNAME is set directly, then avoid parsing config. + if [ -n "${DI_DSNAME}" ]; then + dslist="${DI_DSNAME}" + fi + + # LP: #1582323. cc:{'datasource_list': ['name']} + # more generically cc:[end_cc] + local cb="]" ob="[" + case "$DI_KERNEL_CMDLINE" in + *cc:*datasource_list*) + t=${DI_KERNEL_CMDLINE##*datasource_list} + t=${t%%$cb*} + t=${t##*$ob} + parse_yaml_array "$t" + dslist=${_RET} + ;; + esac + if [ -z "$dslist" ] && check_config datasource_list; then + debug 1 "$_RET_fname set datasource_list: $_RET" + parse_yaml_array "$_RET" + dslist=${_RET} + fi + if [ -z "$dslist" ]; then + dslist=${DI_DSLIST_DEFAULT} + debug 1 "no datasource_list found, using default:" $dslist + fi + DI_DSLIST=$dslist + return 0 +} + +dmi_product_name_matches() { + is_container && return 1 + case "${DI_DMI_PRODUCT_NAME}" in + $1) return 0;; + esac + return 1 +} + +dmi_product_name_is() { + is_container && return 1 + [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] +} + +dmi_sys_vendor_is() { + is_container && return 1 + [ "${DI_DMI_SYS_VENDOR}" = "$1" ] +} + +has_fs_with_label() { + local label=",$1," + case "${DI_FS_LABELS}" in + *,$label,*) return 0;; + esac + return 1 +} + +nocase_equal() { + # nocase_equal(a, b) + # return 0 if case insenstive comparision a.lower() == b.lower() + # different lengths + [ "${#1}" = "${#2}" ] || return 1 + # case sensitive equal + [ "$1" = "$2" ] && return 0 + + local delim="-delim-" + out=$(echo "$1${delim}$2" | tr A-Z a-z) + [ "${out#*${delim}}" = "${out%${delim}*}" ] +} + +check_seed_dir() { + # check_seed_dir(name, [required]) + # check the seed dir /var/lib/cloud/seed/ for 'required' + # required defaults to 'meta-data' + local name="$1" + local dir="${PATH_VAR_LIB_CLOUD}/seed/$name" + [ -d "$dir" ] || return 1 + shift + if [ $# -eq 0 ]; then + set -- meta-data + fi + local f="" + for f in "$@"; do + [ -f "$dir/$f" ] || return 1 + done + return 0 +} + +probe_floppy() { + cached "${STATE_FLOPPY_PROBED}" && return "${STATE_FLOPPY_PROBED}" + local fpath=/dev/floppy + + [ -b "$fpath" ] || + { STATE_FLOPPY_PROBED=1; return 1; } + + modprobe --use-blacklist floppy >/dev/null 2>&1 || + { STATE_FLOPPY_PROBED=1; return 1; } + + udevadm settle "--exit-if-exists=$fpath" || + { STATE_FLOPPY_PROBED=1; return 1; } + + [ -b "$fpath" ] + STATE_FLOPPY_PROBED=$? + return "${STATE_FLOPPY_PROBED}" +} + + +dscheck_CloudStack() { + is_container && return ${DS_NOT_FOUND} + dmi_product_name_matches "CloudStack*" && return $DS_FOUND + return $DS_NOT_FOUND +} + +dscheck_CloudSigma() { + # http://paste.ubuntu.com/23624795/ + dmi_product_name_is "CloudSigma" && return $DS_FOUND + return $DS_NOT_FOUND +} + +check_config() { + # somewhat hackily read config for 'key' in files matching 'files' + # currently does not respect any hierarchy. + local key="$1" files="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + if [ $# -eq 1 ]; then + files="$bp ${bp}.d/*.cfg" + else + files="$*" + fi + shift + set +f; set -- $files; set +f; + if [ "$1" = "$files" -a ! -f "$1" ]; then + return 1 + fi + local fname="" line="" ret="" found=0 found_fn="" + for fname in "$@"; do + [ -f "$fname" ] || continue + while read line; do + line=${line%%#*} + case "$line" in + $key:\ *|$key:) + ret=${line#*:}; + ret=${ret# }; + found=$((found+1)) + found_fn="$fname";; + esac + done <"$fname" + done + if [ $found -ne 0 ]; then + _RET="$ret" + _RET_fname="$found_fn" + return 0 + fi + return 1 +} + +dscheck_MAAS() { + is_container && return "${DS_NOT_FOUND}" + # heuristic check for ephemeral boot environment + # for maas that do not set 'ci.dsname=' in the ephemeral environment + # these have iscsi root and cloud-config-url on the cmdline. + local maasiqn="iqn.2004-05.com.ubuntu:maas" + case "${DI_KERNEL_CMDLINE}" in + *cloud-config-url=*${maasiqn}*|*${maasiqn}*cloud-config-url=*) + return ${DS_FOUND} + ;; + esac + + # check config files written by maas for installed system. + local confd="${PATH_CLOUD_CONFD}" + local fnmatch="$confd/*maas*.cfg $confd/*kernel_cmdline*.cfg" + if check_config "MAAS" "$fnmatch"; then + return "${DS_FOUND}" + fi + return ${DS_NOT_FOUND} +} + +dscheck_NoCloud() { + local fslabel="cidata" d="" + for d in nocloud nocloud-net; do + check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} + done + if has_fs_with_label "${fslabel}"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v2() { + if has_fs_with_label "config-2"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +check_configdrive_v1() { + # FIXME: this has to check any file system that is vfat... + # for now, just return not found. + return ${DS_NOT_FOUND} +} + +dscheck_ConfigDrive() { + local ret="" + check_configdrive_v2 + ret=$? + [ $DS_FOUND -eq $ret ] && return $ret + + check_configdrive_v1 +} + +dscheck_DigitalOcean() { + dmi_sys_vendor_is DigitalOcean && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OpenNebula() { + check_seed_dir opennebula && return ${DS_FOUND} + has_fs_with_label "CONTEXT" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_OVF() { + local p="" + check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + + has_cdrom || return ${DS_NOT_FOUND} + + # FIXME: currently just return maybe if there is a cdrom + # ovf iso9660 transport does not specify an fs label. + # better would be to check if + return ${DS_MAYBE} +} + +dscheck_Azure() { + # http://paste.ubuntu.com/23630873/ + # $ grep /sr0 /run/blkid/blkid.tab + # /dev/sr0 + # + check_seed_dir azure ovf-env.xml && return ${DS_FOUND} + + [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} + + has_fs_with_label "rd_rdfe_*" && return ${DS_FOUND} + + return ${DS_NOT_FOUND} +} + +dscheck_Bigstep() { + # bigstep is activated by presense of seed file 'url' + check_seed_dir "bigstep" url && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + +dscheck_Ec2() { + # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html + # http://paste.ubuntu.com/23630859/ + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + is_container && return ${DS_NOT_FOUND} + # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' + if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && + [ "${uuid#ec2}" != "$uuid" ]; then + return ${DS_FOUND} + fi + + # product uuid and product serial start with case insensitive + local uuid=${DI_DMI_PRODUCT_UUID} serial=${DI_DMI_PRODUCT_SERIAL} + case "$uuid:$serial" in + [Ee][Cc]2*:[Ee][Cc]2) + # both start with ec2, now check for case insenstive equal + nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + esac + + # search through config files to check for platform + local f="" match="${PATH_CLOUD_CONFD}/*ec2*.cfg" + # look for the key 'platform' (datasource/ec2/look_alike/behavior) + if check_config platform "$match"; then + if [ "$platform" != "Unknown" ]; then + _RET="$name" + return "${DS_FOUND}" + fi + fi + + return ${DS_NOT_FOUND} +} + +dscheck_GCE() { + if dmi_product_name_is "Google Compute Engine"; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_OpenStack() { + # the openstack metadata http service + + # if there is a config drive, then do not check metadata + # FIXME: if config drive not in the search list, then we should not + # do this check. + check_configdrive_v2 + if [ $? -eq ${DS_FOUND} ]; then + return ${DS_NOT_FOUND} + fi + if dmi_product_name_is "OpenStack Nova"; then + return ${DS_FOUND} + fi + case "${DI_VIRT}" in + lxc|lxc-libvirt) + # FIXME: This could be container on openstack (nova-lxd) + # or nova-libvirt-lxc + return ${DS_NOT_FOUND} + ;; + esac + + return ${DS_NOT_FOUND} +} + +dscheck_AliYun() { + # aliyun is not enabled by default (LP: #1638931) + # so if we are here, it is because the datasource_list was + # set to include it. Thus, 'maybe'. + return $DS_MAYBE +} + +dscheck_AltCloud() { + # ctype: either the dmi product name, or contents of + # /etc/sysconfig/cloud-info + # if ctype == "vsphere" + # device = device with label 'CDROM' + # elif ctype == "rhev" + # device = /dev/floppy + # then, filesystem on that device must have + # user-data.txt or deltacloud-user-data.txt + local ctype="" dev="" + local match_rhev="[Rr][Hh][Ee][Vv]" + local match_vsphere="[Vv][Ss][Pp][Hh][Ee][Rr][Ee]" + local cinfo="${PATH_ROOT}/etc/sysconfig/cloud-info" + if [ -f "$cinfo" ]; then + read ctype < "$cinfo" + else + ctype="${DI_DMI_PRODUCT_NAME}" + fi + case "$ctype" in + ${match_rhev}) + probe_floppy || return ${DS_NOT_FOUND} + dev="/dev/floppy" + ;; + ${match_vsphere}) + block_dev_with_label CDROM || return ${DS_NOT_FOUND} + dev="$_RET" + ;; + *) return ${DS_NOT_FOUND};; + esac + + # FIXME: need to check $dev for user-data.txt or deltacloud-user-data.txt + : "$dev" + return $DS_MAYBE +} + +dscheck_SmartOS() { + # joyent cloud has two virt types: kvm and container + # on kvm, product name on joyent public cloud shows 'SmartDC HVM' + # on the container platform, uname's version has: BrandZ virtual linux + local smartdc_kver="BrandZ virtual linux" + dmi_product_name_matches "SmartDC*" && return $DS_FOUND + if [ "${DI_UNAME_KERNEL_VERSION}" = "${smartdc_kver}" ] && + [ "${DI_VIRT}" = "container-other" ]; then + return ${DS_FOUND} + fi + return ${DS_NOT_FOUND} +} + +dscheck_None() { + return ${DS_NOT_FOUND} +} + +collect_info() { + read_virt + read_kernel_cmdline + read_uname_info + read_config + read_datasource_list + read_dmi_sys_vendor + read_dmi_product_name + read_dmi_product_serial + read_dmi_product_uuid + read_fs_labels +} + +print_info() { + collect_info + _print_info +} + +_print_info() { + local n="" v="" vars="" + vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" + vars="$vars DMI_PRODUCT_UUID" + vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" + vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" + vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" + vars="$vars DSNAME DSLIST" + vars="$vars MODE REPORT ON_FOUND ON_MAYBE ON_NOTFOUND" + for v in ${vars}; do + eval n='${DI_'"$v"'}' + echo "$v=$n" + done + echo "pid=$$ ppid=$PPID" + is_container && echo "is_container=true" || echo "is_container=false" +} + +write_result() { + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" + if [ "$DI_REPORT" = "true" ]; then + runcfg="$runcfg.report" + fi + for line in "$@"; do + echo "$line" + done > "$runcfg" + ret=$? + [ $ret -eq 0 ] || { + error "failed to write to ${runcfg}" + return $ret + } + return 0 +} + +found() { + local list="" ds="" + # always we write the None datasource last. + for ds in "$@" None; do + list="${list:+${list}, }$ds" + done + write_result "datasource_list: [ $list ]" + return +} + +trim() { + set -- $* + _RET="$*" +} + +unquote() { + # remove quotes from quoted value + local quote='"' tick="'" + local val="$1" + case "$val" in + ${quote}*${quote}|${tick}*${tick}) + val=${val#?}; val=${val%?};; + esac + _RET="$val" +} + +_read_config() { + # reads config from stdin, modifies _rc scoped environment vars. + # rc_policy and _rc_dsname + local line="" hash="#" ckey="" key="" val="" + while read line; do + line=${line%%${hash}*} + key="${line%%:*}" + + # no : in the line. + [ "$key" = "$line" ] && continue + trim "$key" + key=${_RET} + + val="${line#*:}" + trim "$val" + unquote "${_RET}" + val=${_RET} + case "$key" in + datasource) _rc_dsname="$val";; + policy) _rc_policy="$val";; + esac + done +} + +parse_warn() { + echo "WARN: invalid value '$2' for key '$1'. Using $1=$3." 1>&2 +} + +parse_def_policy() { + local _rc_mode="" _rc_report="" _rc_found="" _rc_maybe="" _rc_notfound="" + local ret="" + parse_policy "$@" + ret=$? + _def_mode=$_rc_mode + _def_report=$_rc_report + _def_found=$_rc_found + _def_maybe=$_rc_maybe + _def_notfound=$_rc_notfound + return $ret +} + +parse_policy() { + # parse_policy(policy, default) + # parse a policy string. sets + # _rc_mode (enable|disable,search) + # _rc_report true|false + # _rc_found first|all + # _rc_maybe all|none + # _rc_notfound enable|disable + local def="" + case "$DI_UNAME_MACHINE" in + # these have dmi data + i?86|x86_64|aarch64) def=${DI_DEFAULT_POLICY};; + *) def=${DI_DEFAULT_POLICY_NO_DMI};; + esac + local policy="$1" + local _def_mode="" _def_report="" _def_found="" _def_maybe="" + local _def_notfound="" + if [ $# -eq 1 ] || [ "$2" != "-" ]; then + def=${2:-${def}} + parse_def_policy "$def" - + fi + + local mode="" report="" found="" maybe="" notfound="" + local oifs="$IFS" tok="" val="" + IFS=","; set -- $policy; IFS="$oifs" + for tok in "$@"; do + val=${tok#*=} + case "$tok" in + report) report=true;; + $DI_ENABLED|$DI_DISABLED|search) mode=$tok;; + found=all|found=first) found=$val;; + maybe=all|maybe=none) maybe=$val;; + notfound=$DI_ENABLED|notfound=$DI_DISABLED) notfound=$val;; + found=*) + parse_warn found "$val" "${_def_found}" + found=${_def_found};; + maybe=*) + parse_warn maybe "$val" "${_def_maybe}" + maybe=${_def_maybe};; + notfound=*) + parse_warn notfound "$val" "${_def_notfound}" + notfound=${_def_notfound};; + esac + done + report=${report:-${_def_report:-false}} + _rc_report=${report} + _rc_mode=${mode:-${_def_mode}} + _rc_found=${found:-${_def_found}} + _rc_maybe=${maybe:-${_def_maybe}} + _rc_notfound=${notfound:-${_def_notfound}} +} + +read_config() { + local config=${PATH_DI_CONFIG} + local _rc_dsname="" _rc_policy="" ret="" + if [ -f "$config" ]; then + _read_config < "$config" + ret=$? + elif [ -e "$config" ]; then + error "$config exists but is not a file!" + ret=1 + fi + local tok="" key="" val="" + for tok in ${DI_KERNEL_CMDLINE}; do + key=${tok%%=*} + val=${tok#*=} + case "$key" in + ci.ds) _rc_dsname="$val";; + ci.datasource) _rc_dsname="$val";; + ci.di.policy) _rc_policy="$val";; + esac + done + + local _rc_mode _rc_report _rc_found _rc_maybe _rc_notfound + parse_policy "${_rc_policy}" + debug 1 "policy loaded: mode=${_rc_mode} report=${_rc_report}" \ + "found=${_rc_found} maybe=${_rc_maybe} notfound=${_rc_notfound}" + DI_MODE=${_rc_mode} + DI_REPORT=${_rc_report} + DI_ON_FOUND=${_rc_found} + DI_ON_MAYBE=${_rc_maybe} + DI_ON_NOTFOUND=${_rc_notfound} + + DI_DSNAME="${_rc_dsname}" + return $ret +} + + +manual_clean_and_existing() { + [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] +} + +main() { + local dscheck="" ret_dis=1 ret_en=0 + collect_info + + if [ ! -e "$PATH_RUN_CI_CFG" ]; then + # the first time the generator is run. + _print_info >> "$DI_LOG" + fi + + case "$DI_MODE" in + $DI_DISABLED) + debug 1 "mode=$DI_DISABLED. returning $ret_dis" + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "mode=$DI_ENABLED. returning $ret_en" + return $ret_en;; + search) :;; + esac + + if [ -n "${DI_DSNAME}" ]; then + debug 1 "datasource '$DI_DSNAME' specified." + found "$DI_DSNAME" + return + fi + + if manual_clean_and_existing; then + debug 1 "manual_cache_clean enabled. Not writing datasource_list." + write_result "# manual_cache_clean." + return + fi + + # if there is only a single entry in $DI_DSLIST + set -- $DI_DSLIST + if [ $# -eq 1 ] || [ $# -eq 2 -a "$2" = "None" ] ; then + debug 1 "single entry in datasource_list ($DI_DSLIST) use that." + found "$@" + return + fi + + local found="" ret="" ds="" maybe="" + for ds in ${DI_DSLIST}; do + dscheck_fn="dscheck_${ds}" + debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" + if ! type "$dscheck_fn" >/dev/null 2>&1; then + warn "No check method '$dscheck_fn' for datasource '$ds'" + continue + fi + $dscheck_fn + ret="$?" + case "$ret" in + $DS_FOUND) + debug 1 "check for '$ds' returned found"; + found="${found} $ds";; + $DS_MAYBE) + debug 1 "check for $ds returned maybe"; + maybe="${maybe} $ds";; + *) debug 2 "check for $ds returned not-found[$ret]";; + esac + done + + debug 2 "found=$found maybe=$maybe" + set -- $found + if [ $# -ne 0 ]; then + if [ $# -eq 1 ]; then + debug 1 "Found single datasource: $1" + else + # found=all + debug 1 "Found $# datasources found=${DI_ON_FOUND}: $*" + if [ "${DI_ON_FOUND}" = "first" ]; then + set -- "$1" + fi + fi + found "$@" + return + fi + + set -- $maybe + if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then + debug 1 "$# datasources returned maybe: $*" + found "$@" + return + fi + + case "$DI_ON_NOTFOUND" in + $DI_DISABLED) + debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." + return $ret_dis + ;; + $DI_ENABLED) + debug 1 "notfound=$DI_ENABLED. returning $ret_en" + return $ret_en;; + esac + + error "Unexpected result" + return 3 +} + +noop() { + : +} + +case "${DI_MAIN}" in + main|print_info|noop) "${DI_MAIN}" "$@";; + *) error "unexpected value for DI_MAIN"; exit 1;; +esac + +# vi: syntax=sh ts=4 expandtab -- cgit v1.2.3 From 5f14a0b1ca3079e4ab43d615840866a4b7d8df6a Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Mon, 30 Jan 2017 15:31:36 -0700 Subject: code-style: make master pass pycodestyle (2.3.1) cleanly, currently: $ pycodestyle cloudinit/ tests/ tools/ tools/make-mime.py:25:5: E722 do not use bare except' tools/mock-meta.py:252:17: E722 do not use bare except' For tools/make-mime.py:25:5: E722 do not use bare except' the use case is when someone runs ./make-mime.py --attach commis instead of ./make-mime.py --attach commissaire.txt:x-commissaire-host The split can cause a ValueError potentially if there is no: For tools/mock-meta.py:262:17: E722 do not use bare except' the use case is a dictionary look up occurs potentially when an unknown key is given: key_name = key_ids[key_id] Do note that version 2.3.0 falsely reported a dozen or so E302 and E306 errors. --- tools/make-mime.py | 2 +- tools/mock-meta.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/make-mime.py b/tools/make-mime.py index 12727126..f6a72044 100755 --- a/tools/make-mime.py +++ b/tools/make-mime.py @@ -22,7 +22,7 @@ def file_content_type(text): try: filename, content_type = text.split(":", 1) return (open(filename, 'r'), filename, content_type.strip()) - except: + except ValueError: raise argparse.ArgumentError("Invalid value for %r" % (text)) diff --git a/tools/mock-meta.py b/tools/mock-meta.py index a0d99441..95fc4659 100755 --- a/tools/mock-meta.py +++ b/tools/mock-meta.py @@ -259,7 +259,10 @@ class MetaDataHandler(object): try: key_id = int(mybe_key) key_name = key_ids[key_id] - except: + except ValueError: + raise WebException(hclient.BAD_REQUEST, + "%s: not an integer" % mybe_key) + except KeyError: raise WebException(hclient.BAD_REQUEST, "Unknown key id %r" % mybe_key) # Extract the possible sub-params -- cgit v1.2.3 From 7f85a3a5b4586ac7f21309aac4edc39e6ffea9ef Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 9 Feb 2017 12:25:54 -0500 Subject: ds-identify: change aarch64 to use the default for non-dmi systems. aarch64 does support dmi, but OpenStack does not populate guests with this information, and there are currently bugs in qemu preventing it from working correctly see bug #1663304 for more information. So, for the time being, pretend as if there is no dmi data on aarch64, which will make it enable cloud-init even when no datasources are found. --- tools/ds-identify | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 203eac0d..f07866a2 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -824,7 +824,9 @@ parse_policy() { local def="" case "$DI_UNAME_MACHINE" in # these have dmi data - i?86|x86_64|aarch64) def=${DI_DEFAULT_POLICY};; + i?86|x86_64) def=${DI_DEFAULT_POLICY};; + # aarch64 has dmi, but not currently used (LP: #1663304) + aarch64) def=${DI_DEFAULT_POLICY_NO_DMI};; *) def=${DI_DEFAULT_POLICY_NO_DMI};; esac local policy="$1" -- cgit v1.2.3 From 0df21b6ea89697e8700ad51158327533aa573c91 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 9 Feb 2017 10:56:01 -0500 Subject: support nova-lxd by reading platform from environment of pid 1. Nova lxd will now put the environment variable 'platform' into pid 1's environment to the value 'OpenStack Nova', which is the same as you would find in kvm guests. LP: #1661797 --- tools/ds-identify | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index f07866a2..88094af7 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -62,6 +62,7 @@ PATH_VAR_LIB_CLOUD="${PATH_VAR_LIB_CLOUD:-${PATH_ROOT}/var/lib/cloud}" PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" +PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} @@ -81,6 +82,7 @@ DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" DI_KERNEL_CMDLINE="" DI_VIRT="" +DI_PID_1_PLATFORM="" DI_UNAME_KERNEL_NAME="" DI_UNAME_KERNEL_RELEASE="" @@ -350,6 +352,21 @@ read_datasource_list() { return 0 } +read_pid1_platform() { + local oifs="$IFS" out="" tok="" key="" val="" platform="${UNAVAILABLE}" + cached "${DI_PID_1_PLATFORM}" && return + [ -r "${PATH_PROC_1_ENVIRON}" ] || return + out=$(tr '\0' '\n' <"${PATH_PROC_1_ENVIRON}") + IFS="$CR"; set -- $out; IFS="$oifs" + for tok in "$@"; do + key=${tok%%=*} + [ "$key" != "$tok" ] || continue + val=${tok#*=} + [ "$key" = "platform" ] && platform="$val" && break + done + DI_PID_1_PLATFORM="$platform" +} + dmi_product_name_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_NAME}" in @@ -625,13 +642,9 @@ dscheck_OpenStack() { if dmi_product_name_is "OpenStack Nova"; then return ${DS_FOUND} fi - case "${DI_VIRT}" in - lxc|lxc-libvirt) - # FIXME: This could be container on openstack (nova-lxd) - # or nova-libvirt-lxc - return ${DS_NOT_FOUND} - ;; - esac + if [ "${DI_PID_1_PLATFORM}" = "OpenStack Nova" ]; then + return ${DS_FOUND} + fi return ${DS_NOT_FOUND} } @@ -697,6 +710,7 @@ dscheck_None() { collect_info() { read_virt + read_pid1_platform read_kernel_cmdline read_uname_info read_config @@ -716,7 +730,7 @@ print_info() { _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" - vars="$vars DMI_PRODUCT_UUID" + vars="$vars DMI_PRODUCT_UUID PID_1_PLATFORM" vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" -- cgit v1.2.3 From e6098c2cd0a1786ba5b34b603247b4ef644e2312 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 Feb 2017 13:44:32 -0600 Subject: ds-identify: read ds=nocloud properly The nocloud datasource specifically would look for ds=nocloud or ds=nocloud-net (often augmented with 'seedfrom') on the kernel command line. Fix to return DS_FOUND in that case. LP: #1663723 --- tools/ds-identify | 3 +++ 1 file changed, 3 insertions(+) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 88094af7..e454ed6d 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -514,6 +514,9 @@ dscheck_MAAS() { dscheck_NoCloud() { local fslabel="cidata" d="" + case " ${DI_KERNEL_CMDLINE} " in + *\ ds=nocloud*) return ${DS_FOUND};; + esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done -- cgit v1.2.3 From 65529b6fca5915438612c161c01fe7b57c2a59b1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 10 Feb 2017 14:14:21 -0600 Subject: ds-identify: fix checking for filesystem label has_fs_with_label regressed when refactoring to not have leading and trailing , in DI_FS_LABELS. LP: #1663735 --- tools/ds-identify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index e454ed6d..3ba36f8f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -386,8 +386,8 @@ dmi_sys_vendor_is() { } has_fs_with_label() { - local label=",$1," - case "${DI_FS_LABELS}" in + local label="$1" + case ",${DI_FS_LABELS}," in *,$label,*) return 0;; esac return 1 -- cgit v1.2.3 From f4e8eb0a18b775e341823cfa1a7b305af753d548 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Feb 2017 15:25:24 -0500 Subject: ds-identify: only run once per boot unless --force is given. This makes ds-identify run only once. Previously it would run multiple times each boot as the generator would run more than once. This is potentially dangerous, in that running again might find more attached disks. However that is really only a "lucky" fix if it happens to result differently than the first run. Additionally, we now log the uptime that we started and ended at. --- tools/ds-identify | 45 ++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 42 insertions(+), 3 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 3ba36f8f..7bb63862 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -63,9 +63,11 @@ PATH_DI_CONFIG="${PATH_DI_CONFIG:-${PATH_ROOT}/etc/cloud/ds-identify.cfg}" PATH_PROC_CMDLINE="${PATH_PROC_CMDLINE:-${PATH_ROOT}/proc/cmdline}" PATH_PROC_1_CMDLINE="${PATH_PROC_1_CMDLINE:-${PATH_ROOT}/proc/1/cmdline}" PATH_PROC_1_ENVIRON="${PATH_PROC_1_ENVIRON:-${PATH_ROOT}/proc/1/environ}" +PATH_PROC_UPTIME=${PATH_PROC_UPTIME:-${PATH_ROOT}/proc/uptime} PATH_CLOUD_CONFD="${PATH_CLOUD_CONFD:-${PATH_ROOT}/etc/cloud}" PATH_RUN_CI="${PATH_RUN_CI:-${PATH_RUN}/cloud-init}" PATH_RUN_CI_CFG=${PATH_RUN_CI_CFG:-${PATH_RUN_CI}/cloud.cfg} +PATH_RUN_DI_RESULT=${PATH_RUN_DI_RESULT:-${PATH_RUN_CI}/.ds-identify.result} DI_LOG="${DI_LOG:-${PATH_RUN_CI}/ds-identify.log}" _DI_LOGGED="" @@ -750,6 +752,8 @@ _print_info() { write_result() { local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" if [ "$DI_REPORT" = "true" ]; then + # if report is true, then we write to .report, but touch the other. + : > "$runcfg" runcfg="$runcfg.report" fi for line in "$@"; do @@ -924,12 +928,24 @@ manual_clean_and_existing() { [ -f "${PATH_VAR_LIB_CLOUD}/instance/manual-clean" ] } -main() { +read_uptime() { + local up idle + _RET="${UNAVAILABLE}" + [ -f "$PATH_PROC_UPTIME" ] && + read up idle < "$PATH_PROC_UPTIME" && _RET="$up" + return +} + +_main() { local dscheck="" ret_dis=1 ret_en=0 + + read_uptime + debug 1 "[up ${_RET}s]" "ds-identify $*" collect_info - if [ ! -e "$PATH_RUN_CI_CFG" ]; then - # the first time the generator is run. + if [ "$DI_LOG" = "stderr" ]; then + _print_info 1>&2 + else _print_info >> "$DI_LOG" fi @@ -1022,6 +1038,29 @@ main() { return 3 } +main() { + local ret="" + [ -d "$PATH_RUN_CI" ] || mkdir -p "$PATH_RUN_CI" + if [ "${1:+$1}" != "--force" ] && [ -f "$PATH_RUN_CI_CFG" ] && + [ -f "$PATH_RUN_DI_RESULT" ]; then + if read ret < "$PATH_RUN_DI_RESULT"; then + if [ "$ret" = "0" ] || [ "$ret" = "1" ]; then + debug 2 "used cached result $ret. pass --force to re-run." + return $ret; + fi + debug 1 "previous run returned unexpected '$ret'. Re-running." + else + error "failed to read result from $PATH_RUN_DI_RESULT!" + fi + fi + _main "$@" + ret=$? + echo "$ret" > "$PATH_RUN_DI_RESULT" + read_uptime + debug 1 "[up ${_RET}s]" "returning $ret" + return $ret +} + noop() { : } -- cgit v1.2.3 From 5551e8fc40ba37f0bd133f9478a8db8ce9f79dd7 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 14:22:45 -0500 Subject: tools/ds-identify: fix documentation of policy setting in a comment. Just remove some examples that are no longer valid. --- tools/ds-identify | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 7bb63862..9b14b92a 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -29,17 +29,11 @@ # all: enable all DS_MAYBE # none: ignore any DS_MAYBE # -# notfound: (default=disable) -# disable: disable cloud-init -# enable: enable cloud-init +# notfound: (default=disabled) +# disabled: disable cloud-init +# enabled: enable cloud-init # # -# zesty: -# policy: found=first,maybe=all,none=disable -# xenial: -# policy: found=all,maybe=all,none=enable -# and then at a later date - set -u set -f -- cgit v1.2.3 From cff1335be979fd1be5512d241ab861cfe70d82f0 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 22:42:50 -0500 Subject: tools/ds-identify: use quotes in local declarations. The following can have cause issue: FOO="bar ; wark" showit() { local b=$FOO echo $b } 4: local: ;: bad variable name The answer is just to use more quotes. --- tools/ds-identify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 9b14b92a..f2878745 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -601,7 +601,7 @@ dscheck_Ec2() { fi # product uuid and product serial start with case insensitive - local uuid=${DI_DMI_PRODUCT_UUID} serial=${DI_DMI_PRODUCT_SERIAL} + local uuid="${DI_DMI_PRODUCT_UUID}" serial="${DI_DMI_PRODUCT_SERIAL}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2) # both start with ec2, now check for case insenstive equal @@ -883,7 +883,7 @@ parse_policy() { } read_config() { - local config=${PATH_DI_CONFIG} + local config="${PATH_DI_CONFIG}" local _rc_dsname="" _rc_policy="" ret="" if [ -f "$config" ]; then _read_config < "$config" -- cgit v1.2.3 From e0efe853b805ca3c66155b7307a67af5175b3f46 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 17:13:52 -0500 Subject: tools/ds-identify: read the seed directory on Ec2 This just adds checking of the Ec2 seed directory. --- tools/ds-identify | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index f2878745..c15ba5c0 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -592,8 +592,11 @@ dscheck_Bigstep() { dscheck_Ec2() { # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html # http://paste.ubuntu.com/23630859/ - local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" + + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} is_container && return ${DS_NOT_FOUND} + + local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then -- cgit v1.2.3 From 56f66872923e653ba64c9f9baa0ad7a23a9da0c1 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 11:37:31 -0500 Subject: tools/ds-identify: add support for found or maybe contributing config. A check function that returns found or maybe can also now return config that will be written to the resultant /run/cloud.cfg. They do so by setting the variable _RET_excfg. --- tools/ds-identify | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index c15ba5c0..1cd1118f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -765,12 +765,22 @@ write_result() { } found() { + # found(ds1, [ds2 ...], [-- [extra lines]]) local list="" ds="" # always we write the None datasource last. - for ds in "$@" None; do - list="${list:+${list}, }$ds" + while [ $# -ne 0 ]; do + if [ "$1" = "--" ]; then + shift + break + fi + list="${list:+${list}, }$1" + shift done - write_result "datasource_list: [ $list ]" + if [ $# -eq 1 ] && [ -z "$1" ]; then + # do not pass an empty line through. + shift + fi + write_result "datasource_list: [ $list ]" "$@" return } @@ -977,7 +987,8 @@ _main() { return fi - local found="" ret="" ds="" maybe="" + local found="" ret="" ds="" maybe="" _RET_excfg="" + local exfound_cfg="" exmaybe_cfg="" for ds in ${DI_DSLIST}; do dscheck_fn="dscheck_${ds}" debug 2 "Checking for datasource '$ds' via '$dscheck_fn'" @@ -985,20 +996,23 @@ _main() { warn "No check method '$dscheck_fn' for datasource '$ds'" continue fi + _RET_excfg="" $dscheck_fn ret="$?" case "$ret" in $DS_FOUND) debug 1 "check for '$ds' returned found"; + exfound_cfg="${exfound_cfg:+${exfound_cfg}${CR}}${_RET_excfg}" found="${found} $ds";; $DS_MAYBE) - debug 1 "check for $ds returned maybe"; + debug 1 "check for '$ds' returned maybe"; + exmaybe_cfg="${exmaybe_cfg:+${exmaybe_cfg}${CR}}${_RET_excfg}" maybe="${maybe} $ds";; - *) debug 2 "check for $ds returned not-found[$ret]";; + *) debug 2 "check for '$ds' returned not-found[$ret]";; esac done - debug 2 "found=$found maybe=$maybe" + debug 2 "found=${found# } maybe=${maybe# }" set -- $found if [ $# -ne 0 ]; then if [ $# -eq 1 ]; then @@ -1010,14 +1024,14 @@ _main() { set -- "$1" fi fi - found "$@" + found "$@" -- "${exfound_cfg}" return fi set -- $maybe if [ $# -ne 0 -a "${DI_ON_MAYBE}" != "none" ]; then debug 1 "$# datasources returned maybe: $*" - found "$@" + found "$@" -- "${exmaybe_cfg}" return fi -- cgit v1.2.3 From 131b6f16a314d863e142d5f59c8488b59e28fa97 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 22 Feb 2017 13:35:51 -0500 Subject: ds-identify: add reading of datasource/Ec2/strict_id ds-identify will now read this setting, and thus allow the user to modify ds-identifies behavior via either: 1. builtin setting here cloud-init/ds-identify builtin 2. ds-identify config (/etc/cloud/ds-identify.cfg) 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) 4. kernel command line (ci.datasource.ec2.strict_id=true) --- tools/ds-identify | 98 +++++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 85 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 1cd1118f..bfb55ed1 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -4,12 +4,12 @@ # or on the kernel command line. It takes primarily 2 inputs: # datasource: can specify the datasource that should be used. # kernel command line option: ci.datasource= -# +# # policy: a string that indicates how ds-identify should operate. # kernel command line option: ci.di.policy= # default setting is: # search,found=all,maybe=all,notfound=disable - +# # report: write config to /run/cloud-init/cloud.cfg.report (instead of # /run/cloud-init/cloud.cfg, which effectively makes this dry-run). # enable: do nothing @@ -33,6 +33,10 @@ # disabled: disable cloud-init # enabled: enable cloud-init # +# ci.datasource.ec2.strict_id: (true|false|warn[,0-9]) +# if ec2 datasource does not strictly match, +# return not_found if true +# return maybe if false or warn*. # set -u @@ -589,6 +593,48 @@ dscheck_Bigstep() { return ${DS_NOT_FOUND} } +ec2_read_strict_setting() { + # the 'strict_id' setting for Ec2 controls behavior when + # the platform does not identify itself directly as Ec2. + # order of precedence is: + # 1. builtin setting here cloud-init/ds-identify builtin + # 2. ds-identify config + # 3. system config (/etc/cloud/cloud.cfg.d/*Ec2*.cfg) + # 4. kernel command line (undocumented) + # 5. user-data or vendor-data (not available here) + local default="$1" key="ci.datasource.ec2.strict_id" val="" + + # 4. kernel command line + case " ${DI_KERNEL_CMDLINE} " in + *\ $key=*\ ) + val=${DI_KERNEL_CMDLINE##*$key=} + val=${val%% *}; + _RET=${val:-$default} + return 0 + esac + + # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + match="$bp.d/*[Ee][Cc]2*.cfg" + if check_config strict_id "$match"; then + debug 2 "${_RET_fname} set strict_id to $_RET" + return 0 + fi + + # 2. ds-identify config (datasource.ec2.strict) + local config="${PATH_DI_CONFIG}" + if [ -f "$config" ]; then + if _read_config "$key" < "$config"; then + _RET=${_RET:-$default} + return 0 + fi + fi + + # 1. Default + _RET=$default + return 0 +} + dscheck_Ec2() { # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html # http://paste.ubuntu.com/23630859/ @@ -611,17 +657,28 @@ dscheck_Ec2() { nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; esac - # search through config files to check for platform - local f="" match="${PATH_CLOUD_CONFD}/*ec2*.cfg" - # look for the key 'platform' (datasource/ec2/look_alike/behavior) - if check_config platform "$match"; then - if [ "$platform" != "Unknown" ]; then - _RET="$name" - return "${DS_FOUND}" - fi + local default="true" + if ec2_read_strict_setting "$default"; then + strict="$_RET" + else + debug 1 "ec2_read_strict returned non-zero: $?. using '$default'." + strict="$default" fi - return ${DS_NOT_FOUND} + local key="datasource/Ec2/strict_id" + case "$strict" in + true|false|warn|warn,[0-9]*) :;; + *) + warn "$key was set to invalid '$strict'. using '$default'" + strict="$default";; + esac + + _RET_excfg="datasource: {Ec2: {strict_id: \"$strict\"}}" + if [ "$strict" = "true" ]; then + return $DS_NOT_FOUND + else + return $DS_MAYBE + fi } dscheck_GCE() { @@ -801,8 +858,10 @@ unquote() { } _read_config() { - # reads config from stdin, modifies _rc scoped environment vars. - # rc_policy and _rc_dsname + # reads config from stdin, + # if no parameters are set, modifies _rc scoped environment vars. + # if keyname is provided, then returns found value of that key. + local keyname="${1:-_unset}" local line="" hash="#" ckey="" key="" val="" while read line; do line=${line%%${hash}*} @@ -813,15 +872,28 @@ _read_config() { trim "$key" key=${_RET} + [ "$keyname" != "_unset" ] && [ "$keyname" != "$key" ] && + continue + val="${line#*:}" trim "$val" unquote "${_RET}" val=${_RET} + + if [ "$keyname" = "$key" ]; then + _RET="$val" + return 0 + fi + case "$key" in datasource) _rc_dsname="$val";; policy) _rc_policy="$val";; esac done + if [ "$keyname" = "_unset" ]; then + return 1 + fi + return 0 } parse_warn() { -- cgit v1.2.3 From 9bb55c6c45bcc5e310cf7e4d42cad53759dcca15 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 23 Feb 2017 17:15:27 -0500 Subject: DatasourceEc2: add warning message when not on AWS. Based on the setting Datasource/Ec2/strict_id, the datasource will now warn once per instance. --- cloudinit/sources/DataSourceAliYun.py | 4 + cloudinit/sources/DataSourceEc2.py | 178 +++++++++++++++++++++++++++++++++- tools/ds-identify | 40 ++++++-- 3 files changed, 211 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 2d00255c..9debe947 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,6 +22,10 @@ class DataSourceAliYun(EC2.DataSourceEc2): def get_public_ssh_keys(self): return parse_public_keys(self.metadata.get('public-keys', {})) + @property + def cloud_platform(self): + return EC2.Platforms.ALIYUN + def parse_public_keys(public_keys): keys = [] diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index c657fd09..26da263a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -9,6 +9,7 @@ # This file is part of cloud-init. See LICENSE file for license information. import os +import textwrap import time from cloudinit import ec2_utils as ec2 @@ -22,12 +23,23 @@ LOG = logging.getLogger(__name__) # Which version we are requesting of the ec2 metadata apis DEF_MD_VERSION = '2009-04-04' +STRICT_ID_PATH = ("datasource", "Ec2", "strict_id") +STRICT_ID_DEFAULT = "warn" + + +class Platforms(object): + ALIYUN = "AliYun" + AWS = "AWS" + SEEDED = "Seeded" + UNKNOWN = "Unknown" + class DataSourceEc2(sources.DataSource): # Default metadata urls that will be used if none are provided # They will be checked for 'resolveability' and some of the # following may be discarded if they do not resolve metadata_urls = ["http://169.254.169.254", "http://instance-data.:8773"] + _cloud_platform = None def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) @@ -41,8 +53,18 @@ class DataSourceEc2(sources.DataSource): self.userdata_raw = seed_ret['user-data'] self.metadata = seed_ret['meta-data'] LOG.debug("Using seeded ec2 data from %s", self.seed_dir) + self._cloud_platform = Platforms.SEEDED return True + strict_mode, _sleep = read_strict_mode( + util.get_cfg_by_path(self.sys_cfg, STRICT_ID_PATH, + STRICT_ID_DEFAULT), ("warn", None)) + + LOG.debug("strict_mode: %s, cloud_platform=%s", + strict_mode, self.cloud_platform) + if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: + return False + try: if not self.wait_for_metadata_service(): return False @@ -51,8 +73,8 @@ class DataSourceEc2(sources.DataSource): ec2.get_instance_userdata(self.api_ver, self.metadata_address) self.metadata = ec2.get_instance_metadata(self.api_ver, self.metadata_address) - LOG.debug("Crawl of metadata service took %s seconds", - int(time.time() - start_time)) + LOG.debug("Crawl of metadata service took %.3f seconds", + time.time() - start_time) return True except Exception: util.logexc(LOG, "Failed reading from metadata address %s", @@ -190,6 +212,158 @@ class DataSourceEc2(sources.DataSource): return az[:-1] return None + @property + def cloud_platform(self): + if self._cloud_platform is None: + self._cloud_platform = identify_platform() + return self._cloud_platform + + def activate(self, cfg, is_new_instance): + if not is_new_instance: + return + if self.cloud_platform == Platforms.UNKNOWN: + warn_if_necessary( + util.get_cfg_by_path(cfg, STRICT_ID_PATH, STRICT_ID_DEFAULT)) + + +def read_strict_mode(cfgval, default): + try: + return parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return default + + +def parse_strict_mode(cfgval): + # given a mode like: + # true, false, warn,[sleep] + # return tuple with string mode (true|false|warn) and sleep. + if cfgval is True: + return 'true', None + if cfgval is False: + return 'false', None + + if not cfgval: + return 'warn', 0 + + mode, _, sleep = cfgval.partition(",") + if mode not in ('true', 'false', 'warn'): + raise ValueError( + "Invalid mode '%s' in strict_id setting '%s': " + "Expected one of 'true', 'false', 'warn'." % (mode, cfgval)) + + if sleep: + try: + sleep = int(sleep) + except ValueError: + raise ValueError("Invalid sleep '%s' in strict_id setting '%s': " + "not an integer" % (sleep, cfgval)) + else: + sleep = None + + return mode, sleep + + +def warn_if_necessary(cfgval): + try: + mode, sleep = parse_strict_mode(cfgval) + except ValueError as e: + LOG.warn(e) + return + + if mode == "false": + return + + show_warning(sleep) + + +def show_warning(sleep): + message = textwrap.dedent(""" + **************************************************************** + # This system is using the EC2 Metadata Service, but does not # + # appear to be running on Amazon EC2 or one of cloud-init's # + # known platforms that provide a EC2 Metadata service. In the # + # future, cloud-init may stop reading metadata from the EC2 # + # Metadata Service unless the platform can be identified # + # # + # If you are seeing this message, please file a bug against # + # cloud-init at https://bugs.launchpad.net/cloud-init/+filebug # + # Make sure to include the cloud provider your instance is # + # running on. # + # # + # For more information see # + # https://bugs.launchpad.net/cloud-init/+bug/1660385 # + # # + # After you have filed a bug, you can disable this warning by # + # launching your instance with the cloud-config below, or # + # putting that content into # + # /etc/cloud/cloud.cfg.d/99-ec2-datasource.cfg # + # # + # #cloud-config # + # datasource: # + # Ec2: # + # strict_id: false # + # # + """) + closemsg = "" + if sleep: + closemsg = " [sleeping for %d seconds] " % sleep + message += closemsg.center(64, "*") + print(message) + LOG.warn(message) + if sleep: + time.sleep(sleep) + + +def identify_aws(data): + # data is a dictionary returned by _collect_platform_data. + if (data['uuid'].startswith('ec2') and + (data['uuid_source'] == 'hypervisor' or + data['uuid'] == data['serial'])): + return Platforms.AWS + + return None + + +def identify_platform(): + # identify the platform and return an entry in Platforms. + data = _collect_platform_data() + checks = (identify_aws, lambda x: Platforms.UNKNOWN) + for checker in checks: + try: + result = checker(data) + if result: + return result + except Exception as e: + LOG.warn("calling %s with %s raised exception: %s", + checker, data, e) + + +def _collect_platform_data(): + # returns a dictionary with all lower case values: + # uuid: system-uuid from dmi or /sys/hypervisor + # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' + # serial: dmi 'system-serial-number' (/sys/.../product_serial) + data = {} + try: + uuid = util.load_file("/sys/hypervisor/uuid").strip() + data['uuid_source'] = 'hypervisor' + except Exception: + uuid = util.read_dmi_data('system-uuid') + data['uuid_source'] = 'dmi' + + if uuid is None: + uuid = '' + data['uuid'] = uuid.lower() + + serial = util.read_dmi_data('system-serial-number') + if serial is None: + serial = '' + + data['serial'] = serial.lower() + + return data + # Used to match classes to dependencies datasources = [ diff --git a/tools/ds-identify b/tools/ds-identify index bfb55ed1..dfa856ff 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -635,28 +635,50 @@ ec2_read_strict_setting() { return 0 } -dscheck_Ec2() { - # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/identify_ec2_instances.html - # http://paste.ubuntu.com/23630859/ - - check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} - is_container && return ${DS_NOT_FOUND} +ec2_identify_platform() { + local default="$1" + local serial="${DI_DMI_PRODUCT_SERIAL}" + # AWS http://docs.aws.amazon.com/AWSEC2/ + # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" # if the (basically) xen specific /sys/hypervisor/uuid starts with 'ec2' if [ -r "$hvuuid" ] && read uuid < "$hvuuid" && [ "${uuid#ec2}" != "$uuid" ]; then - return ${DS_FOUND} + _RET="AWS" + return 0 fi # product uuid and product serial start with case insensitive - local uuid="${DI_DMI_PRODUCT_UUID}" serial="${DI_DMI_PRODUCT_SERIAL}" + local uuid="${DI_DMI_PRODUCT_UUID}" case "$uuid:$serial" in [Ee][Cc]2*:[Ee][Cc]2) # both start with ec2, now check for case insenstive equal - nocase_equal "$uuid" "$serial" && return ${DS_FOUND};; + nocase_equal "$uuid" "$serial" && + { _RET="AWS"; return 0; };; esac + _RET="$default" + return 0; +} + +dscheck_Ec2() { + check_seed_dir "ec2" meta-data user-data && return ${DS_FOUND} + is_container && return ${DS_NOT_FOUND} + + local unknown="Unknown" platform="" + if ec2_identify_platform "$unknown"; then + platform="$_RET" + else + warn "Failed to identify ec2 platform. Using '$unknown'." + platform=$unknown + fi + + debug 1 "ec2 platform is '$platform'." + if [ "$platform" != "$unknown" ]; then + return $DS_FOUND + fi + local default="true" if ec2_read_strict_setting "$default"; then strict="$_RET" -- cgit v1.2.3 From 5dd5b2cb539a84ed59f2b3181020d2bd18989718 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 14:19:20 -0500 Subject: Identify Brightbox as an Ec2 datasource user. Brightbox will identify their platform to the guest by setting the product serial to a string that ends with 'brightbox.com'. LP: #1661693 --- cloudinit/sources/DataSourceEc2.py | 8 +++++++- tools/ds-identify | 5 +++++ 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 26da263a..c7df8060 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -30,6 +30,7 @@ STRICT_ID_DEFAULT = "warn" class Platforms(object): ALIYUN = "AliYun" AWS = "AWS" + BRIGHTBOX = "Brightbox" SEEDED = "Seeded" UNKNOWN = "Unknown" @@ -325,10 +326,15 @@ def identify_aws(data): return None +def identify_brightbox(data): + if data['serial'].endswith('brightbox.com'): + return Platforms.BRIGHTBOX + + def identify_platform(): # identify the platform and return an entry in Platforms. data = _collect_platform_data() - checks = (identify_aws, lambda x: Platforms.UNKNOWN) + checks = (identify_aws, identify_brightbox, lambda x: Platforms.UNKNOWN) for checker in checks: try: result = checker(data) diff --git a/tools/ds-identify b/tools/ds-identify index dfa856ff..c39956fc 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -639,6 +639,11 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" + # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 + case "$serial" in + *brightbox.com) _RET="Brightbox"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="$PATH_ROOT/sys/hypervisor/uuid" -- cgit v1.2.3 From 83606aecaae571ce8eb7d6499de028192d82f79b Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Feb 2017 15:13:15 -0500 Subject: tools/ds-identify: ovf identify vmware guest customization. cloud-init by default sets 'disable_vmware_customization' to True. So in ds-identify, we will enable the ovf datasource if: - virt is vmware - 'libdeployPkgPlugin.so' exists as installed by vmware-tools or open-vm-tools. - disable_vmware_customization is configured to True --- tools/ds-identify | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index c39956fc..34bf0643 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -559,10 +559,45 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +ovf_vmware_guest_customization() { + # vmware guest customization + + # virt provider must be vmware + [ "${DI_VIRT}" = "vmware" ] || return 1 + + # we have to have the plugin to do vmware customization + local found="" pkg="" pre="/usr/lib" + for pkg in vmware-tools open-vm-tools; do + if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then + found="$pkg"; break; + fi + done + [ -n "$found" ] || return 1 + + # disable_vmware_customization defaults to False. + # any value then other than false means disabled. + local key="disable_vmware_customization" + local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" + match="$bp.d/*[Oo][Vv][Ff]*.cfg" + if check_config "$key" "$match"; then + debug 2 "${_RET_fname} set $key to $_RET" + case "$_RET" in + 0|false|False) return 0;; + *) return;; + esac + fi + + return 1 +} + dscheck_OVF() { local p="" check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" + if ovf_vmware_guest_customization; then + return ${DS_FOUND} + fi + has_cdrom || return ${DS_NOT_FOUND} # FIXME: currently just return maybe if there is a cdrom -- cgit v1.2.3 From 05afe04edbe4c28f2170194d226821c1e755ee2d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 11:35:35 -0500 Subject: tools/ds-identify: disable vmware_guest_customization by default. ovf_vmware_guest_customization defaults to true in cloud-init, meaning that such customization is disabled. We just missed a return value causing ovf_vmware_guest_customization to effectively default to on. Also, when looking for setting look at /etc/cloud/cloud.cfg. This had been omitted in interest of performance, but we should be looking there. --- tools/ds-identify | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 34bf0643..e618963b 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -574,16 +574,17 @@ ovf_vmware_guest_customization() { done [ -n "$found" ] || return 1 - # disable_vmware_customization defaults to False. - # any value then other than false means disabled. + # vmware customization is disabled by default + # (disable_vmware_customization=true). If it is set to false, then + # user has requested customization. local key="disable_vmware_customization" local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp.d/*[Oo][Vv][Ff]*.cfg" + match="$bp $bp.d/*[Oo][Vv][Ff]*.cfg" if check_config "$key" "$match"; then debug 2 "${_RET_fname} set $key to $_RET" case "$_RET" in 0|false|False) return 0;; - *) return;; + *) return 1;; esac fi -- cgit v1.2.3 From 4bb60d517da45919310265fa241e1e76b63e97bd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 11:38:34 -0500 Subject: tools/ds-identify: look at cloud.cfg when looking for ec2 strict_id. In the interest of speed I had skipped the parsing of /etc/cloud/cloud.cfg for the ec2 strict_id setting. In hindsight it seems reasonable for people to put settings there. --- tools/ds-identify | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index e618963b..9711a234 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -651,7 +651,7 @@ ec2_read_strict_setting() { # 3. look for the key 'strict_id' (datasource/Ec2/strict_id) local match="" bp="${PATH_CLOUD_CONFD}/cloud.cfg" - match="$bp.d/*[Ee][Cc]2*.cfg" + match="$bp $bp.d/*[Ee][Cc]2*.cfg" if check_config strict_id "$match"; then debug 2 "${_RET_fname} set strict_id to $_RET" return 0 -- cgit v1.2.3 From ce63e63d7aaf900bac4339503c5d79ff3bd03d18 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 16:55:02 -0500 Subject: Z99-cloud-locale-test.sh: install and make consistent. Modify upstream packaging to install this file, which was already installed in ubuntu packaging. Also, white space changes from tabs to spaces. Very few things in cloud-init are tabs now. Lastly, remove the executable bit on this as ait is not necessary. Scripts in /etc/profile.d do not have executable bit. --- packages/debian/rules.in | 1 + tools/Z99-cloud-locale-test.sh | 148 ++++++++++++++++++++--------------------- 2 files changed, 75 insertions(+), 74 deletions(-) mode change 100755 => 100644 tools/Z99-cloud-locale-test.sh (limited to 'tools') diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 9b004357..3df6053a 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -11,6 +11,7 @@ override_dh_install: dh_install install -d debian/cloud-init/etc/rsyslog.d cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf + install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tools/Z99-cloud-locale-test.sh b/tools/Z99-cloud-locale-test.sh old mode 100755 new mode 100644 index 5912bae2..4978d87e --- a/tools/Z99-cloud-locale-test.sh +++ b/tools/Z99-cloud-locale-test.sh @@ -11,90 +11,90 @@ # of how to fix them. locale_warn() { - local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" - local w1 w2 w3 w4 remain + local bad_names="" bad_lcs="" key="" val="" var="" vars="" bad_kv="" + local w1 w2 w3 w4 remain - # if shell is zsh, act like sh only for this function (-L). - # The behavior change will not permenently affect user's shell. - [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh + # if shell is zsh, act like sh only for this function (-L). + # The behavior change will not permenently affect user's shell. + [ "${ZSH_NAME+zsh}" = "zsh" ] && emulate -L sh - # locale is expected to output either: - # VARIABLE= - # VARIABLE="value" - # locale: Cannot set LC_SOMETHING to default locale - while read -r w1 w2 w3 w4 remain; do - case "$w1" in - locale:) bad_names="${bad_names} ${w4}";; - *) - key=${w1%%=*} - val=${w1#*=} - val=${val#\"} - val=${val%\"} - vars="${vars} $key=$val";; - esac - done - for bad in $bad_names; do - for var in ${vars}; do - [ "${bad}" = "${var%=*}" ] || continue - val=${var#*=} - [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && - bad_lcs="${bad_lcs} ${val}" - bad_kv="${bad_kv} $bad=$val" - break - done - done - bad_lcs=${bad_lcs# } - bad_kv=${bad_kv# } - [ -n "$bad_lcs" ] || return 0 + # locale is expected to output either: + # VARIABLE= + # VARIABLE="value" + # locale: Cannot set LC_SOMETHING to default locale + while read -r w1 w2 w3 w4 remain; do + case "$w1" in + locale:) bad_names="${bad_names} ${w4}";; + *) + key=${w1%%=*} + val=${w1#*=} + val=${val#\"} + val=${val%\"} + vars="${vars} $key=$val";; + esac + done + for bad in $bad_names; do + for var in ${vars}; do + [ "${bad}" = "${var%=*}" ] || continue + val=${var#*=} + [ "${bad_lcs#* ${val}}" = "${bad_lcs}" ] && + bad_lcs="${bad_lcs} ${val}" + bad_kv="${bad_kv} $bad=$val" + break + done + done + bad_lcs=${bad_lcs# } + bad_kv=${bad_kv# } + [ -n "$bad_lcs" ] || return 0 - printf "_____________________________________________________________________\n" - printf "WARNING! Your environment specifies an invalid locale.\n" - printf " The unknown environment variables are:\n %s\n" "$bad_kv" - printf " This can affect your user experience significantly, including the\n" - printf " ability to manage packages. You may install the locales by running:\n\n" + printf "_____________________________________________________________________\n" + printf "WARNING! Your environment specifies an invalid locale.\n" + printf " The unknown environment variables are:\n %s\n" "$bad_kv" + printf " This can affect your user experience significantly, including the\n" + printf " ability to manage packages. You may install the locales by running:\n\n" - local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" - local pkgs="" - if [ -e "$sfile" ]; then - for bad in ${bad_lcs}; do - grep -q -i "${bad}" "$sfile" && - to_gen="${to_gen} ${bad}" || - invalid="${invalid} ${bad}" - done - else - printf " sudo apt-get install locales\n" - to_gen=$bad_lcs - fi - to_gen=${to_gen# } + local bad invalid="" to_gen="" sfile="/usr/share/i18n/SUPPORTED" + local pkgs="" + if [ -e "$sfile" ]; then + for bad in ${bad_lcs}; do + grep -q -i "${bad}" "$sfile" && + to_gen="${to_gen} ${bad}" || + invalid="${invalid} ${bad}" + done + else + printf " sudo apt-get install locales\n" + to_gen=$bad_lcs + fi + to_gen=${to_gen# } - local pkgs="" - for bad in ${to_gen}; do - pkgs="${pkgs} language-pack-${bad%%_*}" - done - pkgs=${pkgs# } + local pkgs="" + for bad in ${to_gen}; do + pkgs="${pkgs} language-pack-${bad%%_*}" + done + pkgs=${pkgs# } - if [ -n "${pkgs}" ]; then - printf " sudo apt-get install ${pkgs# }\n" - printf " or\n" - printf " sudo locale-gen ${to_gen# }\n" - printf "\n" - fi - for bad in ${invalid}; do - printf "WARNING: '${bad}' is an invalid locale\n" - done + if [ -n "${pkgs}" ]; then + printf " sudo apt-get install ${pkgs# }\n" + printf " or\n" + printf " sudo locale-gen ${to_gen# }\n" + printf "\n" + fi + for bad in ${invalid}; do + printf "WARNING: '${bad}' is an invalid locale\n" + done - printf "To see all available language packs, run:\n" - printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" - printf "To disable this message for all users, run:\n" - printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" - printf "_____________________________________________________________________\n\n" + printf "To see all available language packs, run:\n" + printf " apt-cache search \"^language-pack-[a-z][a-z]$\"\n" + printf "To disable this message for all users, run:\n" + printf " sudo touch /var/lib/cloud/instance/locale-check.skip\n" + printf "_____________________________________________________________________\n\n" - # only show the message once - : > ~/.cloud-locale-test.skip 2>/dev/null || : + # only show the message once + : > ~/.cloud-locale-test.skip 2>/dev/null || : } [ -f ~/.cloud-locale-test.skip -o -f /var/lib/cloud/instance/locale-check.skip ] || - locale 2>&1 | locale_warn + locale 2>&1 | locale_warn unset locale_warn -# vi: ts=4 noexpandtab +# vi: ts=4 expandtab -- cgit v1.2.3 From 33518d7d62493c7d00e3792146399c9572abe915 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 28 Feb 2017 16:53:56 -0500 Subject: Add profile.d script for showing warnings on login. Z99-cloudinit-warnings.sh can be dropped into /etc/profile.d. Warnings that are written to /var/lib/cloud/instance/warnings will be displayed to the user on stderr when they log in. --- packages/debian/rules.in | 1 + tools/Z99-cloudinit-warnings.sh | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 tools/Z99-cloudinit-warnings.sh (limited to 'tools') diff --git a/packages/debian/rules.in b/packages/debian/rules.in index 3df6053a..053b7649 100755 --- a/packages/debian/rules.in +++ b/packages/debian/rules.in @@ -12,6 +12,7 @@ override_dh_install: install -d debian/cloud-init/etc/rsyslog.d cp tools/21-cloudinit.conf debian/cloud-init/etc/rsyslog.d/21-cloudinit.conf install -D ./tools/Z99-cloud-locale-test.sh debian/cloud-init/etc/profile.d/Z99-cloud-locale-test.sh + install -D ./tools/Z99-cloudinit-warnings.sh debian/cloud-init/etc/profile.d/Z99-cloudinit-warnings.sh override_dh_auto_test: ifeq (,$(findstring nocheck,$(DEB_BUILD_OPTIONS))) diff --git a/tools/Z99-cloudinit-warnings.sh b/tools/Z99-cloudinit-warnings.sh new file mode 100644 index 00000000..b237786b --- /dev/null +++ b/tools/Z99-cloudinit-warnings.sh @@ -0,0 +1,30 @@ +#!/bin/sh +# This file is part of cloud-init. See LICENSE file for license information. + +# Purpose: show user warnings on login. + +cloud_init_warnings() { + local skipf="" warning="" idir="/var/lib/cloud/instance" n=0 + local warndir="$idir/warnings" + local ufile="$HOME/.cloud-warnings.skip" sfile="$warndir/.skip" + [ -d "$warndir" ] || return 0 + [ ! -f "$ufile" ] || return 0 + [ ! -f "$skipf" ] || return 0 + + for warning in "$warndir"/*; do + [ -f "$warning" ] || continue + cat "$warning" + n=$((n+1)) + done + [ $n -eq 0 ] && return 0 + echo "" + echo "Disable the warnings above by:" + echo " touch $ufile" + echo "or" + echo " touch $sfile" +} + +cloud_init_warnings 1>&2 +unset cloud_init_warnings + +# vi: syntax=sh ts=4 expandtab -- cgit v1.2.3 From 51a24555e5e7af709caa8dab1a5e6c7e7f317b17 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 1 Mar 2017 12:12:15 -0500 Subject: tools/ds-identify: make report mode write namespaced results. Now, when ds-identify runs in report mode, it still writes to /run/cloud-init.cfg as search does, but it will namespace the result under the top level 'di_report' entry. --- tools/ds-identify | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 9711a234..fd2a46c8 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -10,8 +10,9 @@ # default setting is: # search,found=all,maybe=all,notfound=disable # -# report: write config to /run/cloud-init/cloud.cfg.report (instead of -# /run/cloud-init/cloud.cfg, which effectively makes this dry-run). +# report: write config to /run/cloud-init/cloud.cfg, but +# namespaced under 'di_report'. Thus cloud-init can still see +# the result, but has no affect. # enable: do nothing # ds-identify writes no config and just exits success # the caller (cloud-init-generator) then enables cloud-init to run @@ -867,15 +868,16 @@ _print_info() { } write_result() { - local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" - if [ "$DI_REPORT" = "true" ]; then - # if report is true, then we write to .report, but touch the other. - : > "$runcfg" - runcfg="$runcfg.report" - fi - for line in "$@"; do - echo "$line" - done > "$runcfg" + local runcfg="${PATH_RUN_CI_CFG}" ret="" line="" pre="" + { + if [ "$DI_REPORT" = "true" ]; then + echo "di_report:" + pre=" " + fi + for line in "$@"; do + echo "${pre}$line"; + done + } > "$runcfg" ret=$? [ $ret -eq 0 ] || { error "failed to write to ${runcfg}" @@ -956,6 +958,7 @@ _read_config() { if [ "$keyname" = "_unset" ]; then return 1 fi + _RET="" return 0 } -- cgit v1.2.3 From d914ed8e573d464c4d21aa41069beb73fd3ce9be Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Mar 2017 21:45:47 -0500 Subject: ds-identify: record not found in cloud.cfg and always add None. On a 'not found' result, was not being written at all. That had the unintended effect of '--force' not working. Now, on a 'not found' result: - if reporting: write the list as found (with just 'None'). - if not reporting: only report that there was nothing found. this means that the warning cloud-init will write about ds-identify failing to find a datasource will be written, but cloud-init will still search its fully configured list. --- tools/ds-identify | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index fd2a46c8..741cf3ae 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -886,10 +886,23 @@ write_result() { return 0 } +record_notfound() { + # in report mode, report nothing was found. + # if not report mode: only report the negative result. + # reporting an empty list would mean cloud-init would not search + # any datasources. + if [ "$DI_REPORT" = "true" ]; then + found -- + else + local msg="# reporting not found result. notfound=${DI_ON_NOTFOUND}." + local DI_REPORT="true" + found -- "$msg" + fi +} + found() { # found(ds1, [ds2 ...], [-- [extra lines]]) local list="" ds="" - # always we write the None datasource last. while [ $# -ne 0 ]; do if [ "$1" = "--" ]; then shift @@ -902,6 +915,8 @@ found() { # do not pass an empty line through. shift fi + # always write the None datasource last. + list="${list:+${list}, }None" write_result "datasource_list: [ $list ]" "$@" return } @@ -1173,13 +1188,15 @@ _main() { return fi + # record the empty result. + record_notfound case "$DI_ON_NOTFOUND" in $DI_DISABLED) debug 1 "No result. notfound=$DI_DISABLED. returning $ret_dis." return $ret_dis ;; $DI_ENABLED) - debug 1 "notfound=$DI_ENABLED. returning $ret_en" + debug 1 "No result. notfound=$DI_ENABLED. returning $ret_en" return $ret_en;; esac -- cgit v1.2.3 From c81ea53bbdc4ada9d2b52430e106aeb3c38b4e0a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 2 Mar 2017 23:19:45 -0500 Subject: ds-identify: move default setting for Ec2/strict_id to a global. Rather than having the dscheck_Ec2 just know the setting, move it up to a more formal declaration. This will make it look more clean when a distro carries a patch to change it to warn. --- tools/ds-identify | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 741cf3ae..d7b2a0b2 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -109,6 +109,7 @@ DI_ON_FOUND="" DI_ON_MAYBE="" DI_ON_NOTFOUND="" +DI_EC2_STRICT_ID_DEFAULT="true" error() { set -- "ERROR:" "$@"; @@ -721,7 +722,7 @@ dscheck_Ec2() { return $DS_FOUND fi - local default="true" + local default="${DI_EC2_STRICT_ID_DEFAULT}" if ec2_read_strict_setting "$default"; then strict="$_RET" else -- cgit v1.2.3