summaryrefslogtreecommitdiff
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rwxr-xr-xtools/build-on-freebsd6
-rwxr-xr-xtools/cloudconfig-schema35
-rwxr-xr-xtools/ds-identify64
-rwxr-xr-xtools/mock-meta.py4
-rwxr-xr-xtools/net-convert.py2
-rwxr-xr-xtools/read-dependencies244
-rwxr-xr-xtools/render-cloudcfg43
-rwxr-xr-xtools/run-centos271
8 files changed, 623 insertions, 46 deletions
diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd
index ccc10b40..ff9153ad 100755
--- a/tools/build-on-freebsd
+++ b/tools/build-on-freebsd
@@ -8,6 +8,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; }
# Check dependencies:
depschecked=/tmp/c-i.dependencieschecked
pkgs="
+ bash
dmidecode
e2fsprogs
py27-Jinja2
@@ -16,7 +17,7 @@ pkgs="
py27-configobj
py27-jsonpatch
py27-jsonpointer
- py27-oauth
+ py27-oauthlib
py27-prettytable
py27-requests
py27-serial
@@ -35,9 +36,6 @@ touch $depschecked
python setup.py build
python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd
-# Install the correct config file:
-cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg
-
# Enable cloud-init in /etc/rc.conf:
sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf
echo 'cloudinit_enable="YES"' >> /etc/rc.conf
diff --git a/tools/cloudconfig-schema b/tools/cloudconfig-schema
new file mode 100755
index 00000000..32f0d61e
--- /dev/null
+++ b/tools/cloudconfig-schema
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+# This file is part of cloud-init. See LICENSE file for license information.
+
+"""cloudconfig-schema
+
+Validate existing files against cloud-config schema or provide supported schema
+documentation.
+"""
+
+import os
+import sys
+
+
+def call_entry_point(name):
+ (istr, dot, ent) = name.rpartition('.')
+ try:
+ __import__(istr)
+ except ImportError:
+ # if that import failed, check dirname(__file__/..)
+ # to support ./bin/program with modules in .
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ try:
+ __import__(istr)
+ except ImportError as e:
+ sys.stderr.write("Unable to find %s: %s\n" % (name, e))
+ sys.exit(2)
+
+ sys.exit(getattr(sys.modules[istr], ent)())
+
+
+if __name__ == '__main__':
+ call_entry_point("cloudinit.config.schema.main")
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tools/ds-identify b/tools/ds-identify
index 74d26537..33bd2991 100755
--- a/tools/ds-identify
+++ b/tools/ds-identify
@@ -85,6 +85,7 @@ DI_MAIN=${DI_MAIN:-main}
DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}"
DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}"
+DI_DMI_CHASSIS_ASSET_TAG=""
DI_DMI_PRODUCT_NAME=""
DI_DMI_SYS_VENDOR=""
DI_DMI_PRODUCT_SERIAL=""
@@ -110,7 +111,8 @@ DI_DSNAME=""
# this has to match the builtin list in cloud-init, it is what will
# be searched if there is no setting found in config.
DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \
-CloudSigma CloudStack DigitalOcean Ec2 GCE OpenNebula OpenStack OVF SmartOS"
+CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \
+OVF SmartOS Scaleway"
DI_DSLIST=""
DI_MODE=""
DI_ON_FOUND=""
@@ -258,6 +260,12 @@ read_kernel_cmdline() {
DI_KERNEL_CMDLINE="$cmdline"
}
+read_dmi_chassis_asset_tag() {
+ cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return
+ get_dmi_field chassis_asset_tag
+ DI_DMI_CHASSIS_ASSET_TAG="$_RET"
+}
+
read_dmi_sys_vendor() {
cached "${DI_DMI_SYS_VENDOR}" && return
get_dmi_field sys_vendor
@@ -385,6 +393,14 @@ read_pid1_product_name() {
DI_PID_1_PRODUCT_NAME="$product_name"
}
+dmi_chassis_asset_tag_matches() {
+ is_container && return 1
+ case "${DI_DMI_CHASSIS_ASSET_TAG}" in
+ $1) return 0;;
+ esac
+ return 1
+}
+
dmi_product_name_matches() {
is_container && return 1
case "${DI_DMI_PRODUCT_NAME}" in
@@ -401,11 +417,6 @@ dmi_product_serial_matches() {
return 1
}
-dmi_product_name_is() {
- is_container && return 1
- [ "${DI_DMI_PRODUCT_NAME}" = "$1" ]
-}
-
dmi_sys_vendor_is() {
is_container && return 1
[ "${DI_DMI_SYS_VENDOR}" = "$1" ]
@@ -477,7 +488,7 @@ dscheck_CloudStack() {
dscheck_CloudSigma() {
# http://paste.ubuntu.com/23624795/
- dmi_product_name_is "CloudSigma" && return $DS_FOUND
+ dmi_product_name_matches "CloudSigma" && return $DS_FOUND
return $DS_NOT_FOUND
}
@@ -544,6 +555,9 @@ dscheck_NoCloud() {
case " ${DI_KERNEL_CMDLINE} " in
*\ ds=nocloud*) return ${DS_FOUND};;
esac
+ case " ${DI_DMI_PRODUCT_SERIAL} " in
+ *\ ds=nocloud*) return ${DS_FOUND};;
+ esac
for d in nocloud nocloud-net; do
check_seed_dir "$d" meta-data user-data && return ${DS_FOUND}
done
@@ -653,6 +667,8 @@ dscheck_Azure() {
# UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209"
# TYPE="udf">/dev/sr0</device>
#
+ local azure_chassis="7783-7084-3265-9085-8269-3286-77"
+ dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND
check_seed_dir azure ovf-env.xml && return ${DS_FOUND}
[ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND}
@@ -785,7 +801,7 @@ dscheck_Ec2() {
}
dscheck_GCE() {
- if dmi_product_name_is "Google Compute Engine"; then
+ if dmi_product_name_matches "Google Compute Engine"; then
return ${DS_FOUND}
fi
# product name is not guaranteed (LP: #1674861)
@@ -806,10 +822,10 @@ dscheck_OpenStack() {
return ${DS_NOT_FOUND}
fi
local nova="OpenStack Nova" compute="OpenStack Compute"
- if dmi_product_name_is "$nova"; then
+ if dmi_product_name_matches "$nova"; then
return ${DS_FOUND}
fi
- if dmi_product_name_is "$compute"; then
+ if dmi_product_name_matches "$compute"; then
# RDO installed nova (LP: #1675349).
return ${DS_FOUND}
fi
@@ -821,10 +837,11 @@ dscheck_OpenStack() {
}
dscheck_AliYun() {
- # aliyun is not enabled by default (LP: #1638931)
- # so if we are here, it is because the datasource_list was
- # set to include it. Thus, 'maybe'.
- return $DS_MAYBE
+ check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND}
+ if dmi_product_name_matches "Alibaba Cloud ECS"; then
+ return $DS_FOUND
+ fi
+ return $DS_NOT_FOUND
}
dscheck_AltCloud() {
@@ -879,6 +896,22 @@ dscheck_None() {
return ${DS_NOT_FOUND}
}
+dscheck_Scaleway() {
+ if [ "${DI_DMI_SYS_VENDOR}" = "Scaleway" ]; then
+ return $DS_FOUND
+ fi
+
+ case " ${DI_KERNEL_CMDLINE} " in
+ *\ scaleway\ *) return ${DS_FOUND};;
+ esac
+
+ if [ -f ${PATH_ROOT}/var/run/scaleway ]; then
+ return ${DS_FOUND}
+ fi
+
+ return ${DS_NOT_FOUND}
+}
+
collect_info() {
read_virt
read_pid1_product_name
@@ -887,6 +920,7 @@ collect_info() {
read_config
read_datasource_list
read_dmi_sys_vendor
+ read_dmi_chassis_asset_tag
read_dmi_product_name
read_dmi_product_serial
read_dmi_product_uuid
@@ -901,7 +935,7 @@ print_info() {
_print_info() {
local n="" v="" vars=""
vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL"
- vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME"
+ vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG"
vars="$vars FS_LABELS KERNEL_CMDLINE VIRT"
vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION"
vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM"
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index f185dbf2..a5d14ab7 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -262,8 +262,8 @@ class MetaDataHandler(object):
except ValueError:
raise WebException(hclient.BAD_REQUEST,
"%s: not an integer" % mybe_key)
- except KeyError:
- raise WebException(hclient.BAD_REQUEST,
+ except IndexError:
+ raise WebException(hclient.NOT_FOUND,
"Unknown key id %r" % mybe_key)
# Extract the possible sub-params
result = traverse(nparams[1:], {
diff --git a/tools/net-convert.py b/tools/net-convert.py
index b2db8adf..68559cbf 100755
--- a/tools/net-convert.py
+++ b/tools/net-convert.py
@@ -75,7 +75,7 @@ def main():
r_cls = sysconfig.Renderer
r = r_cls()
- r.render_network_state(ns, target=args.directory)
+ r.render_network_state(network_state=ns, target=args.directory)
if __name__ == '__main__':
diff --git a/tools/read-dependencies b/tools/read-dependencies
index f4349055..2a648680 100755
--- a/tools/read-dependencies
+++ b/tools/read-dependencies
@@ -1,43 +1,239 @@
#!/usr/bin/env python
+"""List pip dependencies or system package dependencies for cloud-init."""
# You might be tempted to rewrite this as a shell script, but you
# would be surprised to discover that things like 'egrep' or 'sed' may
# differ between Linux and *BSD.
+try:
+ from argparse import ArgumentParser
+except ImportError:
+ raise RuntimeError(
+ 'Could not import python-argparse. Please install python-argparse '
+ 'package to continue')
+
+import json
import os
import re
-import sys
import subprocess
+import sys
+
+DEFAULT_REQUIREMENTS = 'requirements.txt'
+
+# Map the appropriate package dir needed for each distro choice
+DISTRO_PKG_TYPE_MAP = {
+ 'centos': 'redhat',
+ 'redhat': 'redhat',
+ 'debian': 'debian',
+ 'ubuntu': 'debian',
+ 'opensuse': 'suse',
+ 'suse': 'suse'
+}
+
+DISTRO_INSTALL_PKG_CMD = {
+ 'centos': ['yum', 'install', '--assumeyes'],
+ 'redhat': ['yum', 'install', '--assumeyes'],
+ 'debian': ['apt', 'install', '-y'],
+ 'ubuntu': ['apt', 'install', '-y'],
+ 'opensuse': ['zypper', 'install'],
+ 'suse': ['zypper', 'install']
+}
+
+
+# List of base system packages required to enable ci automation
+CI_SYSTEM_BASE_PKGS = {
+ 'common': ['make', 'sudo', 'tar'],
+ 'redhat': ['python-tox'],
+ 'centos': ['python-tox'],
+ 'ubuntu': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild'],
+ 'debian': ['devscripts', 'python3-dev', 'libssl-dev', 'tox', 'sbuild']}
+
+
+# JSON definition of distro-specific package dependencies
+DISTRO_PKG_DEPS_PATH = "packages/pkg-deps.json"
+
+
+def get_parser():
+ """Return an argument parser for this command."""
+ parser = ArgumentParser(description=__doc__)
+ parser.add_argument(
+ '-r', '--requirements-file', type=str, dest='req_files',
+ action='append', default=None,
+ help='pip-style requirements file [default=%s]' % DEFAULT_REQUIREMENTS)
+ parser.add_argument(
+ '-d', '--distro', type=str, choices=DISTRO_PKG_TYPE_MAP.keys(),
+ help='The name of the distro to generate package deps for.')
+ parser.add_argument(
+ '--dry-run', action='store_true', default=False, dest='dry_run',
+ help='Dry run the install, making no package changes.')
+ parser.add_argument(
+ '-s', '--system-pkg-names', action='store_true', default=False,
+ dest='system_pkg_names',
+ help='The name of the distro to generate package deps for.')
+ parser.add_argument(
+ '-i', '--install', action='store_true', default=False,
+ dest='install',
+ help='When specified, install the required system packages.')
+ parser.add_argument(
+ '-t', '--test-distro', action='store_true', default=False,
+ dest='test_distro',
+ help='Additionally install continuous integration system packages '
+ 'required for build and test automation.')
+ parser.add_argument(
+ '-v', '--python-version', type=str, dest='python_version', default=None,
+ choices=["2", "3"],
+ help='Override the version of python we want to generate system '
+ 'package dependencies for. Defaults to the version of python '
+ 'this script is called with')
+ return parser
+
+
+def get_package_deps_from_json(topdir, distro):
+ """Get a dict of build and runtime package requirements for a distro.
+
+ @param topdir: The root directory in which to search for the
+ DISTRO_PKG_DEPS_PATH json blob of package requirements information.
+ @param distro: The specific distribution shortname to pull dependencies
+ for.
+ @return: Dict containing "requires", "build-requires" and "rename" lists
+ for a given distribution.
+ """
+ with open(os.path.join(topdir, DISTRO_PKG_DEPS_PATH), 'r') as stream:
+ deps = json.loads(stream.read())
+ if distro is None:
+ return {}
+ return deps[DISTRO_PKG_TYPE_MAP[distro]]
+
+
+def parse_pip_requirements(requirements_path):
+ """Return the pip requirement names from pip-style requirements_path."""
+ dep_names = []
+ with open(requirements_path, "r") as fp:
+ for line in fp:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+
+ # remove pip-style markers
+ dep = line.split(';')[0]
+
+ # remove version requirements
+ if re.search('[>=.<]+', dep):
+ dep_names.append(re.split(r'[>=.<]+', dep)[0].strip())
+ else:
+ dep_names.append(dep)
+ return dep_names
+
+
+def translate_pip_to_system_pkg(pip_requires, renames, python_ver):
+ """Translate pip package names to distro-specific package names.
+
+ @param pip_requires: List of versionless pip package names to translate.
+ @param renames: Dict containg special case renames from pip name to system
+ package name for the distro.
+ @param python_ver: Optional python version string "2" or "3". When None,
+ use the python version that is calling this script via sys.version_info.
+ """
+ if python_ver is None:
+ python_ver = str(sys.version_info[0])
+ if python_ver == "2":
+ prefix = "python-"
+ else:
+ prefix = "python3-"
+ standard_pkg_name = "{0}{1}"
+ translated_names = []
+ for pip_name in pip_requires:
+ pip_name = pip_name.lower()
+ # Find a rename if present for the distro package and python version
+ rename = renames.get(pip_name, {}).get(python_ver, None)
+ if rename:
+ translated_names.append(rename)
+ else:
+ translated_names.append(
+ standard_pkg_name.format(prefix, pip_name))
+ return translated_names
+
+
+def main(distro):
+ parser = get_parser()
+ args = parser.parse_args()
+ if 'CLOUD_INIT_TOP_D' in os.environ:
+ topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
+ else:
+ topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
-if 'CLOUD_INIT_TOP_D' in os.environ:
- topd = os.path.realpath(os.environ.get('CLOUD_INIT_TOP_D'))
-else:
- topd = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+ if args.test_distro:
+ # Give us all the system deps we need for continuous integration
+ if args.req_files:
+ sys.stderr.write(
+ "Parameter --test-distro overrides --requirements-file. Use "
+ "one or the other.\n")
+ sys.exit(1)
+ args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS),
+ os.path.join(topd, 'test-' + DEFAULT_REQUIREMENTS)]
+ args.install = True
+ if args.req_files is None:
+ args.req_files = [os.path.join(topd, DEFAULT_REQUIREMENTS)]
+ if not os.path.isfile(args.req_files[0]):
+ sys.stderr.write("Unable to locate '%s' file that should "
+ "exist in cloud-init root directory." %
+ args.req_files[0])
+ sys.exit(1)
-for fname in ("setup.py", "requirements.txt"):
- if not os.path.isfile(os.path.join(topd, fname)):
- sys.stderr.write("Unable to locate '%s' file that should "
- "exist in cloud-init root directory." % fname)
+ bad_files = [r for r in args.req_files if not os.path.isfile(r)]
+ if bad_files:
+ sys.stderr.write(
+ "Unable to find requirements files: %s\n" % ','.join(bad_files))
sys.exit(1)
-if len(sys.argv) > 1:
- reqfile = sys.argv[1]
-else:
- reqfile = "requirements.txt"
+ pip_pkg_names = set()
+ for req_path in args.req_files:
+ pip_pkg_names.update(set(parse_pip_requirements(req_path)))
+ deps_from_json = get_package_deps_from_json(topd, args.distro)
+ renames = deps_from_json.get('renames', {})
+ translated_pip_names = translate_pip_to_system_pkg(
+ pip_pkg_names, renames, args.python_version)
+ all_deps = []
+ if args.distro:
+ all_deps.extend(
+ translated_pip_names + deps_from_json['requires'] +
+ deps_from_json['build-requires'])
+ else:
+ if args.system_pkg_names:
+ all_deps = translated_pip_names
+ else:
+ all_deps = pip_pkg_names
+ if args.install:
+ pkg_install(all_deps, args.distro, args.test_distro, args.dry_run)
+ else:
+ print('\n'.join(all_deps))
-with open(os.path.join(topd, reqfile), "r") as fp:
- for line in fp:
- line = line.strip()
- if not line or line.startswith("#"):
- continue
- # remove pip-style markers
- dep = line.split(';')[0]
+def pkg_install(pkg_list, distro, test_distro=False, dry_run=False):
+ """Install a list of packages using the DISTRO_INSTALL_PKG_CMD."""
+ if test_distro:
+ pkg_list = list(pkg_list) + CI_SYSTEM_BASE_PKGS['common']
+ distro_base_pkgs = CI_SYSTEM_BASE_PKGS.get(distro, [])
+ pkg_list += distro_base_pkgs
+ print('Installing deps: {0}{1}'.format(
+ '(dryrun)' if dry_run else '', ' '.join(pkg_list)))
+ install_cmd = []
+ if dry_run:
+ install_cmd.append('echo')
+ if os.geteuid() != 0:
+ install_cmd.append('sudo')
+ install_cmd.extend(DISTRO_INSTALL_PKG_CMD[distro])
+ if distro in ['centos', 'redhat']:
+ # CentOS and Redhat need epel-release to access oauthlib and jsonschema
+ subprocess.check_call(install_cmd + ['epel-release'])
+ if distro in ['suse', 'opensuse', 'redhat', 'centos']:
+ pkg_list.append('rpm-build')
+ subprocess.check_call(install_cmd + pkg_list)
- # remove version requirements
- dep = re.split("[>=.<]*", dep)[0].strip()
- print(dep)
-sys.exit(0)
+if __name__ == "__main__":
+ parser = get_parser()
+ args = parser.parse_args()
+ sys.exit(main(args.distro))
# vi: ts=4 expandtab
diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg
new file mode 100755
index 00000000..e624541a
--- /dev/null
+++ b/tools/render-cloudcfg
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+
+import argparse
+import os
+import sys
+
+if "avoid-pep8-E402-import-not-top-of-file":
+ _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+ sys.path.insert(0, _tdir)
+ from cloudinit import templater
+ from cloudinit import util
+ from cloudinit.atomic_helper import write_file
+
+
+def main():
+ parser = argparse.ArgumentParser()
+ variants = ["bsd", "centos", "fedora", "rhel", "ubuntu", "unknown"]
+ platform = util.system_info()
+ parser.add_argument(
+ "--variant", default=platform['variant'], action="store",
+ help="define the variant.", choices=variants)
+ parser.add_argument(
+ "template", nargs="?", action="store",
+ default='./config/cloud.cfg.tmpl',
+ help="Path to the cloud.cfg template")
+ parser.add_argument(
+ "output", nargs="?", action="store", default="-",
+ help="Output file. Use '-' to write to stdout")
+
+ args = parser.parse_args()
+
+ with open(args.template, 'r') as fh:
+ contents = fh.read()
+ tpl_params = {'variant': args.variant}
+ contents = (templater.render_string(contents, tpl_params)).rstrip() + "\n"
+ util.load_yaml(contents)
+ if args.output == "-":
+ sys.stdout.write(contents)
+ else:
+ write_file(args.output, contents, omode="w")
+
+if __name__ == '__main__':
+ main()
diff --git a/tools/run-centos b/tools/run-centos
new file mode 100755
index 00000000..d44d5145
--- /dev/null
+++ b/tools/run-centos
@@ -0,0 +1,271 @@
+#!/bin/bash
+# This file is part of cloud-init. See LICENSE file for license information.
+
+set -u
+
+VERBOSITY=0
+TEMP_D=""
+KEEP=false
+CONTAINER=""
+
+error() { echo "$@" 1>&2; }
+fail() { [ $# -eq 0 ] || error "$@"; exit 1; }
+errorrc() { local r=$?; error "$@" "ret=$r"; return $r; }
+
+Usage() {
+ cat <<EOF
+Usage: ${0##*/} [ options ] version
+
+ This utility can makes it easier to run tests, build rpm and source rpm
+ generation inside a LXC of the specified version of CentOS.
+
+ version is major release number (6 or 7)
+
+ options:
+ -a | --artifact keep .rpm artifacts
+ -k | --keep keep container after tests
+ -r | --rpm build .rpm
+ -s | --srpm build .src.rpm
+ -u | --unittest run unit tests
+
+ Example:
+ * ${0##*/} --rpm --srpm --unittest 6
+EOF
+}
+
+bad_Usage() { Usage 1>&2; [ $# -eq 0 ] || error "$@"; return 1; }
+cleanup() {
+ if [ -n "$CONTAINER" -a "$KEEP" = "false" ]; then
+ delete_container "$CONTAINER"
+ fi
+ [ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
+}
+
+debug() {
+ local level=${1}; shift;
+ [ "${level}" -gt "${VERBOSITY}" ] && return
+ error "${@}"
+}
+
+
+inside_as() {
+ # inside_as(container_name, user, cmd[, args])
+ # executes cmd with args inside container as user in users home dir.
+ local name="$1" user="$2"
+ shift 2
+ if [ "$user" = "root" ]; then
+ inside "$name" "$@"
+ return
+ fi
+ local stuffed="" b64=""
+ stuffed=$(getopt --shell sh --options "" -- -- "$@")
+ stuffed=${stuffed# -- }
+ b64=$(printf "%s\n" "$stuffed" | base64 --wrap=0)
+ inside "$name" su "$user" -c \
+ 'cd; eval set -- "$(echo '$b64' | base64 --decode)" && exec "$@"'
+}
+
+inside_as_cd() {
+ local name="$1" user="$2" dir="$3"
+ shift 3
+ inside_as "$name" "$user" sh -c 'cd "$0" && exec "$@"' "$dir" "$@"
+}
+
+inside() {
+ local name="$1"
+ shift
+ lxc exec "$name" -- "$@"
+}
+
+inject_cloud_init(){
+ # take current cloud-init git dir and put it inside $name at
+ # ~$user/cloud-init.
+ local name="$1" user="$2" top_d="" dname="" pstat=""
+ top_d=$(git rev-parse --show-toplevel) || {
+ errorrc "Failed to get git top level in $PWD";
+ return
+ }
+ dname=$(basename "${top_d}") || return
+ debug 1 "collecting ${top_d} ($dname) into user $user in $name."
+ tar -C "${top_d}/.." -cpf - "$dname" |
+ inside_as "$name" "$user" sh -ec '
+ dname=$1
+ rm -Rf "$dname"
+ tar -xpf -
+ [ "$dname" = "cloud-init" ] || mv "$dname" cloud-init' \
+ extract "$dname"
+ [ "${PIPESTATUS[*]}" = "0 0" ] || {
+ error "Failed to push tarball of '$top_d' into $name" \
+ " for user $user (dname=$dname)"
+ return 1
+ }
+ return 0
+}
+
+prep() {
+ # we need some very basic things not present in the container.
+ # - git
+ # - tar (CentOS 6 lxc container does not have it)
+ # - python-argparse (or python3)
+ local needed="" pair="" pkg="" cmd="" needed=""
+ for pair in tar:tar git:git; do
+ pkg=${pair#*:}
+ cmd=${pair%%:*}
+ command -v $cmd >/dev/null 2>&1 || needed="${needed} $pkg"
+ done
+ if ! command -v python3; then
+ python -c "import argparse" >/dev/null 2>&1 ||
+ needed="${needed} python-argparse"
+ fi
+ needed=${needed# }
+ if [ -z "$needed" ]; then
+ error "No prep packages needed"
+ return 0
+ fi
+ error "Installing prep packages: ${needed}"
+ yum install --assumeyes ${needed}
+}
+
+start_container() {
+ local src="$1" name="$2"
+ debug 1 "starting container $name from '$src'"
+ lxc launch "$src" "$name" || {
+ errorrc "Failed to start container '$name' from '$src'";
+ return
+ }
+ CONTAINER=$name
+
+ local out="" ret=""
+ debug 1 "waiting for networking"
+ out=$(inside "$name" sh -c '
+ i=0
+ while [ $i -lt 60 ]; do
+ getent hosts mirrorlist.centos.org && exit 0
+ sleep 2
+ done' 2>&1)
+ ret=$?
+ if [ $ret -ne 0 ]; then
+ error "Waiting for network in container '$name' failed. [$ret]"
+ error "$out"
+ return $ret
+ fi
+
+ if [ ! -z "${http_proxy-}" ]; then
+ debug 1 "configuring proxy ${http_proxy}"
+ inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf"
+ fi
+}
+
+delete_container() {
+ debug 1 "removing container $1 [--keep to keep]"
+ lxc delete --force "$1"
+}
+
+main() {
+ local short_opts="ahkrsuv"
+ local long_opts="artifact,help,keep,rpm,srpm,unittest,verbose"
+ local getopt_out=""
+ getopt_out=$(getopt --name "${0##*/}" \
+ --options "${short_opts}" --long "${long_opts}" -- "$@") &&
+ eval set -- "${getopt_out}" ||
+ { bad_Usage; return; }
+
+ local cur="" next=""
+ local artifact="" keep="" rpm="" srpm="" unittest="" version=""
+
+ while [ $# -ne 0 ]; do
+ cur="${1:-}"; next="${2:-}";
+ case "$cur" in
+ -a|--artifact) artifact=1;;
+ -h|--help) Usage ; exit 0;;
+ -k|--keep) KEEP=true;;
+ -r|--rpm) rpm=1;;
+ -s|--srpm) srpm=1;;
+ -u|--unittest) unittest=1;;
+ -v|--verbose) VERBOSITY=$((${VERBOSITY}+1));;
+ --) shift; break;;
+ esac
+ shift;
+ done
+
+ [ $# -eq 1 ] || { bad_Usage "ERROR: Must provide version!"; return; }
+ version="$1"
+ case "$version" in
+ 6|7) :;;
+ *) error "Expected version of 6 or 7, not '$version'"; return;;
+ esac
+
+ TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX") ||
+ fail "failed to make tempdir"
+ trap cleanup EXIT
+
+ # program starts here
+ local uuid="" name="" user="ci-test" cdir=""
+ cdir="/home/$user/cloud-init"
+ uuid=$(uuidgen -t) || { error "no uuidgen"; return 1; }
+ name="cloud-init-centos-${uuid%%-*}"
+
+ start_container "images:centos/$version" "$name"
+
+ # prep the container (install very basic dependencies)
+ inside "$name" bash -s prep <"$0" ||
+ { errorrc "Failed to prep container $name"; return; }
+
+ # add the user
+ inside "$name" useradd "$user"
+
+ debug 1 "inserting cloud-init"
+ inject_cloud_init "$name" "$user" || {
+ errorrc "FAIL: injecting cloud-init into $name failed."
+ return
+ }
+
+ inside_as_cd "$name" root "$cdir" \
+ ./tools/read-dependencies --distro=centos --test-distro || {
+ errorrc "FAIL: failed to install dependencies with read-dependencies"
+ return
+ }
+
+ local errors=0
+ inside_as_cd "$name" "$user" "$cdir" \
+ sh -ec "git checkout .; git status" ||
+ { errorrc "git checkout failed."; errors=$(($errors+1)); }
+
+ if [ -n "$unittest" ]; then
+ debug 1 "running unit tests."
+ inside_as_cd "$name" "$user" "$cdir" nosetests tests/unittests ||
+ { errorrc "nosetests failed."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$srpm" ]; then
+ debug 1 "building srpm."
+ inside_as_cd "$name" "$user" "$cdir" ./packages/brpm --srpm ||
+ { errorrc "brpm --srpm."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$rpm" ]; then
+ debug 1 "building rpm."
+ inside_as_cd "$name" "$user" "$cdir" ./packages/brpm ||
+ { errorrc "brpm failed."; errors=$(($errors+1)); }
+ fi
+
+ if [ -n "$artifact" ]; then
+ for built_rpm in $(inside "$name" sh -c "echo $cdir/*.rpm"); do
+ lxc file pull "$name/$built_rpm" .
+ done
+ fi
+
+ if [ "$errors" != "0" ]; then
+ error "there were $errors errors."
+ return 1
+ fi
+ return 0
+}
+
+if [ "${1:-}" = "prep" ]; then
+ shift
+ prep "$@"
+else
+ main "$@"
+fi
+# vi: ts=4 expandtab