summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/.lp-to-git-user28
-rwxr-xr-xtools/build-on-freebsd73
-rwxr-xr-xtools/cloud-init-per8
-rwxr-xr-xtools/ds-identify136
-rwxr-xr-xtools/make-tarball12
-rwxr-xr-xtools/migrate-lp-user-to-github243
-rwxr-xr-xtools/read-version49
-rwxr-xr-xtools/render-cloudcfg3
-rwxr-xr-xtools/run-container54
-rwxr-xr-xtools/xkvm61
10 files changed, 556 insertions, 111 deletions
diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user
new file mode 100644
index 00000000..6b20d360
--- /dev/null
+++ b/tools/.lp-to-git-user
@@ -0,0 +1,28 @@
+{
+ "adobrawy": "ad-m",
+ "afranceschini": "andreaf74",
+ "ahosmanmsft": "AOhassan",
+ "andreipoltavchenko": "pa-yourserveradmin-com",
+ "askon": "ask0n",
+ "bitfehler": "bitfehler",
+ "chad.smith": "blackboxsw",
+ "d-info-e": "do3meli",
+ "daniel-thewatkins": "OddBloke",
+ "eric-lafontaine1": "elafontaine",
+ "fredlefebvre": "fred-lefebvre",
+ "goneri": "goneri",
+ "harald-jensas": "hjensas",
+ "i.galic": "igalic",
+ "larsks": "larsks",
+ "legovini": "paride",
+ "louis": "karibou",
+ "madhuri-rai07": "madhuri-rai07",
+ "otubo": "otubo",
+ "pengpengs": "PengpengSun",
+ "powersj": "powersj",
+ "raharper": "raharper",
+ "rjschwei": "rjschwei",
+ "tribaal": "chrisglass",
+ "trstringer": "trstringer",
+ "xiaofengw": "xiaofengw-vmware"
+} \ No newline at end of file
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index d23fde2b..876368a9 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -3,37 +3,42 @@
# installing cloud-init. This script takes care of building and installing. It
# will optionally make a first run at the end.
+set -eux
+
fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
+PYTHON="${PYTHON:-python3}"
+if [ ! $(which ${PYTHON}) ]; then
+ echo "Please install python first."
+ exit 1
+fi
+py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))')
+
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
- bash
- dmidecode
- e2fsprogs
- py27-Jinja2
- py27-boto
- py27-cheetah
- py27-configobj
- py27-jsonpatch
- py27-jsonpointer
- py27-oauthlib
- py27-requests
- py27-serial
- py27-six
- py27-yaml
- python
- sudo
+ bash
+ dmidecode
+ e2fsprogs
+ $py_prefix-Jinja2
+ $py_prefix-boto
+ $py_prefix-configobj
+ $py_prefix-jsonpatch
+ $py_prefix-jsonpointer
+ $py_prefix-jsonschema
+ $py_prefix-oauthlib
+ $py_prefix-requests
+ $py_prefix-serial
+ $py_prefix-six
+ $py_prefix-yaml
+ sudo
"
-[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages"
+[ -f "$depschecked" ] || pkg install --yes ${pkgs} || fail "install packages"
touch $depschecked
-# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer
-# Luckily, the install step will take care of this by installing it from pypi...
-
# Build the code and install in /usr/local/:
-python setup.py build
-python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd
+${PYTHON} setup.py build
+${PYTHON} setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd
# Enable cloud-init in /etc/rc.conf:
sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf
@@ -41,21 +46,21 @@ echo 'cloudinit_enable="YES"' >> /etc/rc.conf
echo "Installation completed."
-if [ "$1" = "run" ]; then
- echo "Ok, now let's see if it works."
+if [ "$#" -gt 1 ] && [ "$1" = "run" ]; then
+ echo "Ok, now let's see if it works."
- # Backup SSH keys
- mv /etc/ssh/ssh_host_* /tmp/
+ # Backup SSH keys
+ mv /etc/ssh/ssh_host_* /tmp/
- # Remove old metadata
- rm -rf /var/lib/cloud
+ # Remove old metadata
+ rm -rf /var/lib/cloud
- # Just log everything, quick&dirty
- rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg
+ # Just log everything, quick&dirty
+ rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg
- # Start:
- /usr/local/etc/rc.d/cloudinit start
+ # Start:
+ /usr/local/etc/rc.d/cloudinit start
- # Restore SSH keys
- mv /tmp/ssh_host_* /etc/ssh/
+ # Restore SSH keys
+ mv /tmp/ssh_host_* /etc/ssh/
fi
diff --git a/tools/cloud-init-per b/tools/cloud-init-per
index 7d6754b6..fcd1ea79 100755
--- a/tools/cloud-init-per
+++ b/tools/cloud-init-per
@@ -38,7 +38,7 @@ fi
[ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; }
[ $# -ge 3 ] || { Usage 1>&2; exit 1; }
freq=$1
-name=$2
+name=$(echo $2 | sed 's/-/_/g')
shift 2;
[ "${name#*/}" = "${name}" ] || fail "name cannot contain a /"
@@ -53,6 +53,12 @@ esac
[ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" ||
fail "failed to make directory for ${sem}"
+# Rename legacy sem files with dashes in their names. Do not overwrite existing
+# sem files to prevent clobbering those which may have been created from calls
+# outside of cloud-init.
+sem_legacy=$(echo $sem | sed 's/_/-/g')
+[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem"
+
[ "$freq" != "always" -a -e "$sem" ] && exit 0
"$@"
ret=$?
diff --git a/tools/ds-identify b/tools/ds-identify
index b78b2731..c93d4a77 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -124,7 +124,7 @@ DI_DSNAME=""
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
-OVF SmartOS Scaleway Hetzner IBMCloud Oracle"
+OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -179,13 +179,39 @@ debug() {
echo "$@" 1>&3
}
+dmi_decode() {
+ local sys_field="$1" dmi_field="" val=""
+ command -v dmidecode >/dev/null 2>&1 || {
+ warn "No dmidecode program. Cannot read $sys_field."
+ return 1
+ }
+ case "$1" in
+ sys_vendor) dmi_field="system-manufacturer";;
+ product_name) dmi_field="system-product-name";;
+ product_uuid) dmi_field="system-uuid";;
+ product_serial) dmi_field="system-serial-number";;
+ chassis_asset_tag) dmi_field="chassis-asset-tag";;
+ *) error "Unknown field $sys_field. Cannot call dmidecode."
+ return 1;;
+ esac
+ val=$(dmidecode --quiet "--string=$dmi_field" 2>/dev/null) || return 1
+ _RET="$val"
+}
+
get_dmi_field() {
local path="${PATH_SYS_CLASS_DMI_ID}/$1"
- if [ ! -f "$path" ] || [ ! -r "$path" ]; then
- _RET="$UNAVAILABLE"
+ _RET="$UNAVAILABLE"
+ if [ -d "${PATH_SYS_CLASS_DMI_ID}" ]; then
+ if [ -f "$path" ] && [ -r "$path" ]; then
+ read _RET < "${path}" || _RET="$ERROR"
+ return
+ fi
+ # if `/sys/class/dmi/id` exists, but not the object we're looking for,
+ # do *not* fallback to dmidecode!
return
fi
- read _RET < "${path}" || _RET="$ERROR"
+ dmi_decode "$1" || _RET="$ERROR"
+ return
}
block_dev_with_label() {
@@ -267,6 +293,31 @@ detect_virt() {
if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then
virt="$out"
fi
+ elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then
+ # Map FreeBSD's vm_guest names to those systemd-detect-virt that
+ # don't match up. See
+ # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160
+ # https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html
+ #
+ # systemd | kern.vm_guest
+ # ---------------------+---------------
+ # none | none
+ # kvm | kvm
+ # vmware | vmware
+ # microsoft | hv
+ # oracle | vbox
+ # xen | xen
+ # parallels | parallels
+ # bhyve | bhyve
+ # vm-other | generic
+ out=$(sysctl -qn kern.vm_guest 2>/dev/null) && {
+ case "$out" in
+ hv) virt="microsoft" ;;
+ vbox) virt="oracle" ;;
+ generic) "vm-other";;
+ *) virt="$out"
+ esac
+ }
fi
_RET="$virt"
}
@@ -553,6 +604,11 @@ dscheck_CloudStack() {
return $DS_NOT_FOUND
}
+dscheck_Exoscale() {
+ dmi_product_name_matches "Exoscale*" && return $DS_FOUND
+ return $DS_NOT_FOUND
+}
+
dscheck_CloudSigma() {
# http://paste.ubuntu.com/23624795/
dmi_product_name_matches "CloudSigma" && return $DS_FOUND
@@ -620,7 +676,7 @@ dscheck_MAAS() {
}
dscheck_NoCloud() {
- local fslabel="cidata" d=""
+ local fslabel="cidata CIDATA" d=""
case " ${DI_KERNEL_CMDLINE} " in
*\ ds=nocloud*) return ${DS_FOUND};;
esac
@@ -632,9 +688,10 @@ dscheck_NoCloud() {
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
done
- if has_fs_with_label "${fslabel}"; then
+ if has_fs_with_label $fslabel; then
return ${DS_FOUND}
fi
+
return ${DS_NOT_FOUND}
}
@@ -696,6 +753,11 @@ dscheck_OpenNebula() {
return ${DS_NOT_FOUND}
}
+dscheck_RbxCloud() {
+ has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND}
+ return ${DS_NOT_FOUND}
+}
+
ovf_vmware_guest_customization() {
# vmware guest customization
@@ -762,13 +824,37 @@ is_cdrom_ovf() {
# explicitly skip known labels of other types. rd_rdfe is azure.
case "$label" in
- config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;;
+ config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;;
esac
+ # skip device which size is 10MB or larger
+ local size="" sfile="${PATH_SYS_CLASS_BLOCK}/${dev##*/}/size"
+ [ -f "$sfile" ] || return 1
+ read size <"$sfile" || { warn "failed reading from $sfile"; return 1; }
+ # size is in 512 byte units. so convert to MB (integer division)
+ if [ $((size/2048)) -ge 10 ]; then
+ debug 2 "$dev: size $((size/2048))MB is considered too large for OVF"
+ return 1
+ fi
+
local idstr="http://schemas.dmtf.org/ovf/environment/1"
grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev"
}
+has_ovf_cdrom() {
+ # DI_ISO9660_DEVS is <device>=label,<device>=label2
+ # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces
+ if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then
+ local oifs="$IFS"
+ # shellcheck disable=2086
+ { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; }
+ for tok in "$@"; do
+ is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return 0
+ done
+ fi
+ return 1
+}
+
dscheck_OVF() {
check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}"
@@ -779,20 +865,9 @@ dscheck_OVF() {
ovf_vmware_transport_guestinfo && return "${DS_FOUND}"
- # DI_ISO9660_DEVS is <device>=label,<device>=label2
- # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces
- if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then
- local oifs="$IFS"
- # shellcheck disable=2086
- { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; }
- for tok in "$@"; do
- is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND
- done
- fi
+ has_ovf_cdrom && return "${DS_FOUND}"
- if ovf_vmware_guest_customization; then
- return ${DS_FOUND}
- fi
+ ovf_vmware_guest_customization && return "${DS_FOUND}"
return ${DS_NOT_FOUND}
}
@@ -872,9 +947,18 @@ ec2_identify_platform() {
local default="$1"
local serial="${DI_DMI_PRODUCT_SERIAL}"
- # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693
case "$serial" in
- *brightbox.com) _RET="Brightbox"; return 0;;
+ *.brightbox.com) _RET="Brightbox"; return 0;;
+ esac
+
+ local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}"
+ case "$asset_tag" in
+ *.zstack.io) _RET="ZStack"; return 0;;
+ esac
+
+ local vendor="${DI_DMI_SYS_VENDOR}"
+ case "$vendor" in
+ e24cloud) _RET="E24cloud"; return 0;;
esac
# AWS http://docs.aws.amazon.com/AWSEC2/
@@ -978,6 +1062,14 @@ dscheck_OpenStack() {
return ${DS_FOUND}
fi
+ # LP: #1669875 : allow identification of OpenStack by asset tag
+ if dmi_chassis_asset_tag_matches "$nova"; then
+ return ${DS_FOUND}
+ fi
+ if dmi_chassis_asset_tag_matches "$compute"; then
+ return ${DS_FOUND}
+ fi
+
# LP: #1715241 : arch other than intel are not identified properly.
case "$DI_UNAME_MACHINE" in
i?86|x86_64) :;;
diff --git a/tools/make-tarball b/tools/make-tarball
index 8d540139..462e7d04 100755
--- a/tools/make-tarball
+++ b/tools/make-tarball
@@ -15,24 +15,27 @@ Usage: ${0##*/} [revision]
options:
-h | --help print usage
-o | --output FILE write to file
+ --version VERSION Set the version used in the tarball. Default value is determined with 'git describe'.
--orig-tarball Write file cloud-init_<version>.orig.tar.gz
--long Use git describe --long for versioning
EOF
}
short_opts="ho:v"
-long_opts="help,output:,orig-tarball,long"
+long_opts="help,output:,version:,orig-tarball,long"
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; }
long_opt=""
orig_opt=""
+version=""
while [ $# -ne 0 ]; do
cur=$1; next=$2
case "$cur" in
-h|--help) Usage; exit 0;;
-o|--output) output=$next; shift;;
+ --version) version=$next; shift;;
--long) long_opt="--long";;
--orig-tarball) orig_opt=".orig";;
--) shift; break;;
@@ -41,7 +44,12 @@ while [ $# -ne 0 ]; do
done
rev=${1:-HEAD}
-version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev)
+if [ -z "$version" ]; then
+ version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev)
+elif [ ! -z "$long_opt" ]; then
+ echo "WARNING: --long has no effect when --version is passed" >&2
+ exit 1
+fi
archive_base="cloud-init-$version"
if [ -z "$output" ]; then
diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github
new file mode 100755
index 00000000..f1247cb3
--- /dev/null
+++ b/tools/migrate-lp-user-to-github
@@ -0,0 +1,243 @@
+#!/usr/bin/env python3
+"""Link your Launchpad user to github, proposing branches to LP and Github"""
+
+from argparse import ArgumentParser
+from subprocess import Popen, PIPE
+import os
+import sys
+
+try:
+ from launchpadlib.launchpad import Launchpad
+except ImportError:
+ print("Missing python launchpadlib dependency to create branches for you."
+ "Install with: sudo apt-get install python3-launchpadlib" )
+ sys.exit(1)
+
+if "avoid-pep8-E402-import-not-top-of-file":
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ from cloudinit import util
+
+
+DRYRUN = False
+LP_TO_GIT_USER_FILE='.lp-to-git-user'
+MIGRATE_BRANCH_NAME='migrate-lp-to-github'
+GITHUB_PULL_URL='https://github.com/canonical/cloud-init/compare/master...{github_user}:{branch}'
+GH_UPSTREAM_URL='https://github.com/canonical/cloud-init'
+
+
+def error(message):
+ if isinstance(message, bytes):
+ message = message.decode('utf-8')
+ log('ERROR: {error}'.format(error=message))
+ sys.exit(1)
+
+
+def log(message):
+ print(message)
+
+
+def subp(cmd, skip=False):
+ prefix = 'SKIPPED: ' if skip else '$ '
+ log('{prefix}{command}'.format(prefix=prefix, command=' '.join(cmd)))
+ if skip:
+ return
+ proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
+ out, err = proc.communicate()
+ if proc.returncode:
+ error(err if err else out)
+ return out.decode('utf-8')
+
+
+LP_GIT_PATH_TMPL = 'git+ssh://{launchpad_user}@git.launchpad.net/'
+LP_UPSTREAM_PATH_TMPL = LP_GIT_PATH_TMPL + 'cloud-init'
+LP_REMOTE_PATH_TMPL = LP_GIT_PATH_TMPL + '~{launchpad_user}/cloud-init'
+GITHUB_REMOTE_PATH_TMPL = 'git@github.com:{github_user}/cloud-init.git'
+
+
+# Comment templates
+COMMIT_MSG_TMPL = '''\
+lp-to-git-users: adding {gh_username}
+
+Mapped from {lp_username}
+'''
+PUBLISH_DIR='/tmp/cloud-init-lp-to-github-migration'
+
+def get_parser():
+ parser = ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '--dryrun', required=False, default=False, action='store_true',
+ help=('Run commands and review operation in dryrun mode, '
+ 'making not changes.'))
+ parser.add_argument('launchpad_user', help='Your launchpad username.')
+ parser.add_argument('github_user', help='Your github username.')
+ parser.add_argument(
+ '--local-repo-dir', required=False, dest='repo_dir',
+ help=('The name of the local directory into which we clone.'
+ ' Default: {}'.format(PUBLISH_DIR)))
+ parser.add_argument(
+ '--upstream-branch', required=False, dest='upstream',
+ default='origin/master',
+ help=('The name of remote branch target into which we will merge.'
+ ' Default: origin/master'))
+ parser.add_argument(
+ '-v', '--verbose', required=False, default=False, action='store_true',
+ help=('Print all actions.'))
+ return parser
+
+
+def create_publish_branch(upstream, publish_branch):
+ '''Create clean publish branch target in the current git repo.'''
+ branches = subp(['git', 'branch'])
+ upstream_remote, upstream_branch = upstream.split('/', 1)
+ subp(['git', 'checkout', upstream_branch])
+ subp(['git', 'pull'])
+ if publish_branch in branches:
+ subp(['git', 'branch', '-D', publish_branch])
+ subp(['git', 'checkout', upstream, '-b', publish_branch])
+
+
+def add_lp_and_github_remotes(lp_user, gh_user):
+ """Add lp and github remotes if not present.
+
+ @return Tuple with (lp_remote_name, gh_remote_name)
+ """
+ lp_remote = LP_REMOTE_PATH_TMPL.format(launchpad_user=lp_user)
+ gh_remote = GITHUB_REMOTE_PATH_TMPL.format(github_user=gh_user)
+ remotes = subp(['git', 'remote', '-v'])
+ lp_remote_name = gh_remote_name = None
+ for remote in remotes.splitlines():
+ if not remote:
+ continue
+ remote_name, remote_url, _operation = remote.split()
+ if lp_remote == remote_url:
+ lp_remote_name = remote_name
+ elif gh_remote == remote_url:
+ gh_remote_name = remote_name
+ if not lp_remote_name:
+ log("launchpad: Creating git remote launchpad-{} to point at your"
+ " LP repo".format(lp_user))
+ lp_remote_name = 'launchpad-{}'.format(lp_user)
+ subp(['git', 'remote', 'add', lp_remote_name, lp_remote])
+ try:
+ subp(['git', 'fetch', lp_remote_name])
+ except:
+ log("launchpad: Pushing to ensure LP repo exists")
+ subp(['git', 'push', lp_remote_name, 'master:master'])
+ subp(['git', 'fetch', lp_remote_name])
+ if not gh_remote_name:
+ log("github: Creating git remote github-{} to point at your"
+ " GH repo".format(gh_user))
+ gh_remote_name = 'github-{}'.format(gh_user)
+ subp(['git', 'remote', 'add', gh_remote_name, gh_remote])
+ try:
+ subp(['git', 'fetch', gh_remote_name])
+ except:
+ log("ERROR: [github] Could not fetch remote '{remote}'."
+ "Please create a fork for your github user by clicking 'Fork'"
+ " from {gh_upstream}".format(
+ remote=gh_remote, gh_upstream=GH_UPSTREAM_URL))
+ sys.exit(1)
+ return (lp_remote_name, gh_remote_name)
+
+
+def create_migration_branch(
+ branch_name, upstream, lp_user, gh_user, commit_msg):
+ """Create an LP to Github migration branch and add lp_user->gh_user."""
+ log("Creating a migration branch: {} adding your users".format(
+ MIGRATE_BRANCH_NAME))
+ create_publish_branch(upstream, MIGRATE_BRANCH_NAME)
+ lp_to_git_map = {}
+ lp_to_git_file = os.path.join(os.getcwd(), 'tools', LP_TO_GIT_USER_FILE)
+ if os.path.exists(lp_to_git_file):
+ with open(lp_to_git_file) as stream:
+ lp_to_git_map = util.load_json(stream.read())
+ if gh_user in lp_to_git_map.values():
+ raise RuntimeError(
+ "github user '{}' already in {}".format(gh_user, lp_to_git_file))
+ if lp_user in lp_to_git_map:
+ raise RuntimeError(
+ "launchpad user '{}' already in {}".format(
+ lp_user, lp_to_git_file))
+ lp_to_git_map[lp_user] = gh_user
+ with open(lp_to_git_file, 'w') as stream:
+ stream.write(util.json_dumps(lp_to_git_map))
+ subp(['git', 'add', lp_to_git_file])
+ commit_file = os.path.join(os.path.dirname(os.getcwd()), 'commit.msg')
+ with open(commit_file, 'wb') as stream:
+ stream.write(commit_msg.encode('utf-8'))
+ subp(['git', 'commit', '--all', '-F', commit_file])
+
+
+def main():
+ global DRYRUN
+ global VERBOSITY
+ parser = get_parser()
+ args = parser.parse_args()
+ DRYRUN = args.dryrun
+ VERBOSITY = 1 if args.verbose else 0
+ repo_dir = args.repo_dir or PUBLISH_DIR
+ if not os.path.exists(repo_dir):
+ cleanup_repo_dir = True
+ subp(['git', 'clone',
+ LP_UPSTREAM_PATH_TMPL.format(launchpad_user=args.launchpad_user),
+ repo_dir])
+ else:
+ cleanup_repo_dir = False
+ cwd = os.getcwd()
+ os.chdir(repo_dir)
+ log("Syncing master branch with upstream")
+ subp(['git', 'checkout', 'master'])
+ subp(['git', 'pull'])
+ try:
+ lp_remote_name, gh_remote_name = add_lp_and_github_remotes(
+ args.launchpad_user, args.github_user)
+ commit_msg = COMMIT_MSG_TMPL.format(
+ gh_username=args.github_user, lp_username=args.launchpad_user)
+ create_migration_branch(
+ MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user,
+ args.github_user, commit_msg)
+
+ for push_remote in (lp_remote_name, gh_remote_name):
+ subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force'])
+ except Exception as e:
+ error('Failed setting up migration branches: {0}'.format(e))
+ finally:
+ os.chdir(cwd)
+ if cleanup_repo_dir and os.path.exists(repo_dir):
+ util.del_dir(repo_dir)
+ # Make merge request on LP
+ log("[launchpad] Automatically creating merge proposal using launchpadlib")
+ lp = Launchpad.login_with(
+ "server-team github-migration tool", 'production', version='devel')
+ master = lp.git_repositories.getByPath(
+ path='cloud-init').getRefByPath(path='master')
+ LP_BRANCH_PATH='~{launchpad_user}/cloud-init/+git/cloud-init'
+ lp_git_repo = lp.git_repositories.getByPath(
+ path=LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user))
+ lp_user_migrate_branch = lp_git_repo.getRefByPath(
+ path='refs/heads/migrate-lp-to-github')
+ lp_merge_url = (
+ 'https://code.launchpad.net/' +
+ LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user) +
+ '/+ref/' + MIGRATE_BRANCH_NAME)
+ try:
+ lp_user_migrate_branch.createMergeProposal(
+ commit_message=commit_msg, merge_target=master, needs_review=True)
+ except Exception:
+ log('[launchpad] active merge proposal already exists at:\n'
+ '{url}\n'.format(url=lp_merge_url))
+ else:
+ log("[launchpad] Merge proposal created at:\n{url}.\n".format(
+ url=lp_merge_url))
+ log("To link your account to github open your browser and"
+ " click 'Create pull request' at the following URL:\n"
+ "{url}".format(url=GITHUB_PULL_URL.format(
+ github_user=args.github_user, branch=MIGRATE_BRANCH_NAME)))
+ if os.path.exists(repo_dir):
+ util.del_dir(repo_dir)
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/tools/read-version b/tools/read-version
index 06fd61a8..92e9fc96 100755
--- a/tools/read-version
+++ b/tools/read-version
@@ -45,14 +45,58 @@ def which(program):
return None
+def is_gitdir(path):
+ # Return boolean indicating if path is a git tree.
+ git_meta = os.path.join(path, '.git')
+ if os.path.isdir(git_meta):
+ return True
+ if os.path.exists(git_meta):
+ # in a git worktree, .git is a file with 'gitdir: x'
+ with open(git_meta, "rb") as fp:
+ if b'gitdir:' in fp.read():
+ return True
+ return False
+
+
use_long = '--long' in sys.argv or os.environ.get('CI_RV_LONG')
use_tags = '--tags' in sys.argv or os.environ.get('CI_RV_TAGS')
output_json = '--json' in sys.argv
src_version = ci_version.version_string()
version_long = None
-version = src_version
-version_long = None
+
+# If we're performing CI for a new release branch (which our tooling creates
+# with an "upstream/" prefix), then we don't want to enforce strict version
+# matching because we know it will fail.
+is_release_branch_ci = (
+ os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/")
+)
+if is_gitdir(_tdir) and which("git") and not is_release_branch_ci:
+ flags = []
+ if use_tags:
+ flags = ['--tags']
+ cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags
+
+ try:
+ version = tiny_p(cmd).strip()
+ except RuntimeError:
+ version = None
+
+ if version is None or not version.startswith(src_version):
+ sys.stderr.write("git describe version (%s) differs from "
+ "cloudinit.version (%s)\n" % (version, src_version))
+ sys.stderr.write(
+ "Please get the latest upstream tags.\n"
+ "As an example, this can be done with the following:\n"
+ "$ git remote add upstream https://git.launchpad.net/cloud-init\n"
+ "$ git fetch upstream --tags\n"
+ )
+ sys.exit(1)
+
+ version_long = tiny_p(cmd + ["--long"]).strip()
+else:
+ version = src_version
+ version_long = None
# version is X.Y.Z[+xxx.gHASH]
# version_long is None or X.Y.Z-xxx-gHASH
@@ -75,6 +119,7 @@ data = {
'extra': extra,
'commit': commit,
'distance': distance,
+ 'is_release_branch_ci': is_release_branch_ci,
}
if output_json:
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
index 8b7cb875..3d5fa725 100755
--- a/tools/render-cloudcfg
+++ b/tools/render-cloudcfg
@@ -4,7 +4,8 @@ import argparse
import os
import sys
-VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"]
+VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel",
+ "suse", "ubuntu", "unknown"]
if "avoid-pep8-E402-import-not-top-of-file":
_tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
diff --git a/tools/run-container b/tools/run-container
index 6dedb757..23243474 100755
--- a/tools/run-container
+++ b/tools/run-container
@@ -35,9 +35,6 @@ Usage: ${0##*/} [ options ] [images:]image-ref
tested. Inside container, changes are in
local-changes.diff.
-k | --keep keep container after tests
- --pyexe V python version to use. Default=auto.
- Should be name of an executable.
- ('python2' or 'python3')
-p | --package build a binary package (.deb or .rpm)
-s | --source-package build source package (debuild -S or srpm)
-u | --unittest run unit tests
@@ -262,32 +259,23 @@ prep() {
# we need some very basic things not present in the container.
# - git
# - tar (CentOS 6 lxc container does not have it)
- # - python-argparse (or python3)
+ # - python3
local needed="" pair="" pkg="" cmd="" needed=""
local pairs="tar:tar git:git"
- local pyexe="$1"
get_os_info
- local py2pkg="python2" py3pkg="python3"
+ local py3pkg="python3"
case "$OS_NAME" in
opensuse)
- py2pkg="python-base"
py3pkg="python3-base";;
esac
- case "$pyexe" in
- python2) pairs="$pairs python2:$py2pkg";;
- python3) pairs="$pairs python3:$py3pkg";;
- esac
+ pairs="$pairs python3:$py3pkg"
for pair in $pairs; do
pkg=${pair#*:}
cmd=${pair%%:*}
command -v "$cmd" >/dev/null 2>&1 || needed="${needed} $pkg"
done
- if [ "$OS_NAME" = "centos" -a "$pyexe" = "python2" ]; then
- python -c "import argparse" >/dev/null 2>&1 ||
- needed="${needed} python-argparse"
- fi
needed=${needed# }
if [ -z "$needed" ]; then
error "No prep packages needed"
@@ -300,15 +288,7 @@ prep() {
}
nose() {
- local pyexe="$1" cmd=""
- shift
- get_os_info
- if [ "$OS_NAME/$OS_VERSION" = "centos/6" ]; then
- cmd="nosetests"
- else
- cmd="$pyexe -m nose"
- fi
- ${cmd} "$@"
+ python3 -m nose "$@"
}
is_done_cloudinit() {
@@ -367,12 +347,13 @@ wait_for_boot() {
run_self_inside "$name" wait_inside "$name" "$wtime" "$VERBOSITY" ||
{ errorrc "wait inside $name failed."; return; }
- if [ ! -z "${http_proxy-}" ]; then
+ if [ -n "${http_proxy-}" ]; then
if [ "$OS_NAME" = "centos" ]; then
debug 1 "configuring proxy ${http_proxy}"
inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
inside "$name" sed -i s/enabled=1/enabled=0/ \
/etc/yum/pluginconf.d/fastestmirror.conf
+ inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo"
else
debug 1 "do not know how to configure proxy on $OS_NAME"
fi
@@ -410,7 +391,7 @@ run_self_inside_as_cd() {
main() {
local short_opts="a:hknpsuv"
- local long_opts="artifacts:,dirty,help,keep,name:,pyexe:,package,source-package,unittest,verbose"
+ local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose"
local getopt_out=""
getopt_out=$(getopt --name "${0##*/}" \
--options "${short_opts}" --long "${long_opts}" -- "$@") &&
@@ -419,7 +400,7 @@ main() {
local cur="" next=""
local package=false srcpackage=false unittest="" name=""
- local dirty=false pyexe="auto" artifact_d="."
+ local dirty=false artifact_d="."
while [ $# -ne 0 ]; do
cur="${1:-}"; next="${2:-}";
@@ -429,7 +410,6 @@ main() {
-h|--help) Usage ; exit 0;;
-k|--keep) KEEP=true;;
-n|--name) name="$next"; shift;;
- --pyexe) pyexe=$next; shift;;
-p|--package) package=true;;
-s|--source-package) srcpackage=true;;
-u|--unittest) unittest=1;;
@@ -469,16 +449,8 @@ main() {
get_os_info_in "$name" ||
{ errorrc "failed to get os_info in $name"; return; }
- if [ "$pyexe" = "auto" ]; then
- case "$OS_NAME/$OS_VERSION" in
- centos/*|opensuse/*) pyexe=python2;;
- *) pyexe=python3;;
- esac
- debug 1 "set pyexe=$pyexe for $OS_NAME/$OS_VERSION"
- fi
-
# prep the container (install very basic dependencies)
- run_self_inside "$name" prep "$pyexe" ||
+ run_self_inside "$name" prep ||
{ errorrc "Failed to prep container $name"; return; }
# add the user
@@ -492,7 +464,7 @@ main() {
}
inside_as_cd "$name" root "$cdir" \
- $pyexe ./tools/read-dependencies "--distro=${OS_NAME}" \
+ python3 ./tools/read-dependencies "--distro=${OS_NAME}" \
--test-distro || {
errorrc "FAIL: failed to install dependencies with read-dependencies"
return
@@ -506,7 +478,7 @@ main() {
if [ -n "$unittest" ]; then
debug 1 "running unit tests."
- run_self_inside_as_cd "$name" "$user" "$cdir" nose "$pyexe" \
+ run_self_inside_as_cd "$name" "$user" "$cdir" nose \
tests/unittests cloudinit/ || {
errorrc "nosetests failed.";
errors[${#errors[@]}]="nosetests"
@@ -536,7 +508,7 @@ main() {
}
debug 1 "building source package with $build_srcpkg."
# shellcheck disable=SC2086
- inside_as_cd "$name" "$user" "$cdir" $pyexe $build_srcpkg || {
+ inside_as_cd "$name" "$user" "$cdir" python3 $build_srcpkg || {
errorrc "failed: $build_srcpkg";
errors[${#errors[@]}]="source package"
}
@@ -549,7 +521,7 @@ main() {
}
debug 1 "building binary package with $build_pkg."
# shellcheck disable=SC2086
- inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || {
+ inside_as_cd "$name" "$user" "$cdir" python3 $build_pkg || {
errorrc "failed: $build_pkg";
errors[${#errors[@]}]="binary package"
}
diff --git a/tools/xkvm b/tools/xkvm
index a30ba916..8d44cad7 100755
--- a/tools/xkvm
+++ b/tools/xkvm
@@ -1,4 +1,6 @@
#!/bin/bash
+# This file is part of cloud-init.
+# See LICENSE file for copyright and license info.
set -f
@@ -11,6 +13,8 @@ TAPDEVS=( )
# OVS_CLEANUP gets populated with bridge:devname pairs used with ovs
OVS_CLEANUP=( )
MAC_PREFIX="52:54:00:12:34"
+# allow this to be set externally.
+_QEMU_SUPPORTS_FILE_LOCKING="${_QEMU_SUPPORTS_FILE_LOCKING}"
KVM="kvm"
declare -A KVM_DEVOPTS
@@ -119,6 +123,21 @@ isdevopt() {
return 1
}
+qemu_supports_file_locking() {
+ # hackily check if qemu has file.locking in -drive params (LP: #1716028)
+ if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then
+ # The only way we could find to check presense of file.locking is
+ # qmp (query-qmp-schema). Simply checking if the virtio-blk driver
+ # supports 'share-rw' is expected to be equivalent and simpler.
+ isdevopt virtio-blk share-rw &&
+ _QEMU_SUPPORTS_FILE_LOCKING=true ||
+ _QEMU_SUPPORTS_FILE_LOCKING=false
+ debug 1 "qemu supports file locking = ${_QEMU_SUPPORTS_FILE_LOCKING}"
+ fi
+ [ "$_QEMU_SUPPORTS_FILE_LOCKING" = "true" ]
+ return
+}
+
padmac() {
# return a full mac, given a subset.
# assume whatever is input is the last portion to be
@@ -367,7 +386,7 @@ main() {
[ ${#netdevs[@]} -eq 0 ] && netdevs=( "${DEF_BRIDGE}" )
pt=( "$@" )
- local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci"
+ local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" virtio_rng_device="virtio-rng-pci"
[ -n "$kvm" ] && kvm_pkg="none"
case $(uname -m) in
i?86)
@@ -382,7 +401,10 @@ main() {
[ -n "$kvm" ] ||
{ kvm="qemu-system-s390x"; kvm_pkg="qemu-system-misc"; }
def_netmodel=${DEF_NETMODEL:-"virtio-net-ccw"}
+ # disable virtio-scsi-bus
virtio_scsi_bus="virtio-scsi-ccw"
+ virtio_blk_bus="virtio-blk-ccw"
+ virtio_rng_device="virtio-rng-ccw"
;;
ppc64*)
[ -n "$kvm" ] ||
@@ -408,7 +430,7 @@ main() {
bios_opts=( "${_RET[@]}" )
local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts=""
- local busorindex="" driveopts="" cur="" val="" file=""
+ local busorindex="" driveopts="" cur="" val="" file="" wwn=""
for((i=0;i<${#diskdevs[@]};i++)); do
cur=${diskdevs[$i]}
IFS=","; set -- $cur; IFS="$oifs"
@@ -420,6 +442,7 @@ main() {
unit=""
index=""
serial=""
+ wwn=""
for tok in "$@"; do
[ "${tok#*=}" = "${tok}" -a -f "${tok}" -a -z "$file" ] && file="$tok"
val=${tok#*=}
@@ -433,6 +456,7 @@ main() {
file=*) file=$val;;
fmt=*|format=*) fmt=$val;;
serial=*) serial=$val;;
+ wwn=*) wwn=$val;;
bus=*) bus=$val;;
unit=*) unit=$val;;
index=*) index=$val;;
@@ -443,14 +467,19 @@ main() {
out=$(LANG=C qemu-img info "$file") &&
fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') ||
{ error "failed to determine format of $file"; return 1; }
- else
+ elif [ -z "$fmt" ]; then
fmt=raw
fi
if [ -z "$driver" ]; then
driver="$def_disk_driver"
fi
if [ -z "$serial" ]; then
- serial="${file##*/}"
+ # use filename as serial if not provided a wwn
+ if [ -n "$wwn" ]; then
+ serial="$wwn"
+ else
+ serial="${file##*/}"
+ fi
fi
# make sure we add either bus= or index=
@@ -470,11 +499,21 @@ main() {
id=*|if=*|driver=*|$file|file=*) continue;;
fmt=*|format=*) continue;;
serial=*|bus=*|unit=*|index=*) continue;;
+ file.locking=*)
+ qemu_supports_file_locking || {
+ debug 2 "qemu has no file locking." \
+ "Dropping '$tok' from: $cur"
+ continue
+ };;
esac
isdevopt "$driver" "$tok" && devopts="${devopts},$tok" ||
diskopts="${diskopts},${tok}"
done
-
+ case $driver in
+ virtio-blk-ccw)
+ # disable scsi when using virtio-blk-ccw
+ devopts="${devopts},scsi=off";;
+ esac
diskargs=( "${diskargs[@]}" -drive "$diskopts" -device "$devopts" )
done
@@ -623,10 +662,16 @@ main() {
done
local bus_devices
- bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" )
- cmd=( "${kvmcmd[@]}" "${archopts[@]}"
+ if [ -n "${virtio_scsi_bus}" ]; then
+ bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" )
+ fi
+ local rng_devices
+ rng_devices=( -object "rng-random,filename=/dev/urandom,id=objrng0"
+ -device "$virtio_rng_device,rng=objrng0,id=rng0" )
+ cmd=( "${kvmcmd[@]}" "${archopts[@]}"
"${bios_opts[@]}"
"${bus_devices[@]}"
+ "${rng_devices[@]}"
"${netargs[@]}"
"${diskargs[@]}" "${pt[@]}" )
local pcmd=$(quote_cmd "${cmd[@]}")
@@ -661,4 +706,4 @@ else
main "$@"
fi
-# vi: ts=4 expandtab
+# vi: ts=4 expandtab syntax=sh