From f55bb17ddb2fd64e039057bf7ee50951a0dc93e8 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 17:22:45 +0000 Subject: Vmware: Add support for the com.vmware.guestInfo OVF transport. This adds support for reading OVF information over the 'com.vmware.guestInfo' tranport. The current implementation requires vmware-rpctool be installed in the system. LP: #1807466 --- tools/ds-identify | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index 1acfeeb9..c61f18ae 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -726,6 +726,25 @@ ovf_vmware_guest_customization() { return 1 } +ovf_vmware_transport_guestinfo() { + [ "${DI_VIRT}" = "vmware" ] || return 1 + command -v vmware-rpctool >/dev/null 2>&1 || return 1 + local out="" ret="" + out=$(vmware-rpctool "info-get guestinfo.ovfEnv" 2>&1) + ret=$? + if [ $ret -ne 0 ]; then + debug 1 "Running on vmware but rpctool query returned $ret: $out" + return 1 + fi + case "$1" in + "=label,=label2 # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then -- cgit v1.2.3 From 12bc76cebf69a1c8cf9eba78431333842ed170cf Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Dec 2018 23:51:09 +0000 Subject: ds-identify: fix wrong variable name in ovf_vmware_transport_guestinfo. ovf_vmware_transport_guestinfo is not currently tested. It used '$1' instead of '$out' when checking for xml content in the output of vmware-rpctool. --- tools/ds-identify | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/ds-identify b/tools/ds-identify index c61f18ae..b78b2731 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -736,7 +736,7 @@ ovf_vmware_transport_guestinfo() { debug 1 "Running on vmware but rpctool query returned $ret: $out" return 1 fi - case "$1" in + case "$out" in " Date: Fri, 25 Jan 2019 17:52:41 +0000 Subject: run-container: uncomment baseurl in yum.repos.d/*.repo when using a proxy When using a proxy it is often useful to know in advance which mirrors are to be contacted, so a whitelist can be set up. This is not easy when using the yum.conf(5) mirrorlist option, as the retrieved list of mirrors may change. The repository definition may also specify a canonical mirror with the 'baseurl' option; this option is often commented out by default to favor the usage of worldwide mirrors. This patch uncomments 'baseurl' when an http_proxy is being used, so the canonical mirror is used *in addition to* the mirrors retrieved from the mirrorlist. --- tools/run-container | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/run-container b/tools/run-container index 6dedb757..852f4d1e 100755 --- a/tools/run-container +++ b/tools/run-container @@ -373,6 +373,7 @@ wait_for_boot() { inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf + inside "$name" sh -c "sed -i '/^#baseurl=/s/#//' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi -- cgit v1.2.3 From 9cf9d8cdd3a8fd7d4d425f7051122d0ac8af2bbd Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Mon, 18 Feb 2019 22:55:49 +0000 Subject: This is to fix https://bugs.launchpad.net/cloud-init/+bug/1812676 --- tools/cloud-init-per | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/cloud-init-per b/tools/cloud-init-per index 7d6754b6..eae3e93f 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=$2 +name=${2/-/_} shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -53,6 +53,12 @@ esac [ -d "${sem%/*}" ] || mkdir -p "${sem%/*}" || fail "failed to make directory for ${sem}" +# Rename legacy sem files with dashes in their names. Do not overwrite existing +# sem files to prevent clobbering those which may have been created from calls +# outside of cloud-init. +sem_legacy="${sem/_/-}" +[ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" + [ "$freq" != "always" -a -e "$sem" ] && exit 0 "$@" ret=$? -- cgit v1.2.3 From 3554ffe8657738795ae5e1b89f22b39358d78821 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Fri, 8 Mar 2019 22:37:05 +0000 Subject: cloud-init-per: POSIX sh does not support string subst, use sed cloud-init-per is run via /bin/sh which requires POSIX shell compliance and does not implement string substitution like bash. Replace these calls with use of sed. LP: #1819222 --- tools/cloud-init-per | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/cloud-init-per b/tools/cloud-init-per index eae3e93f..fcd1ea79 100755 --- a/tools/cloud-init-per +++ b/tools/cloud-init-per @@ -38,7 +38,7 @@ fi [ "$1" = "-h" -o "$1" = "--help" ] && { Usage ; exit 0; } [ $# -ge 3 ] || { Usage 1>&2; exit 1; } freq=$1 -name=${2/-/_} +name=$(echo $2 | sed 's/-/_/g') shift 2; [ "${name#*/}" = "${name}" ] || fail "name cannot contain a /" @@ -56,7 +56,7 @@ esac # Rename legacy sem files with dashes in their names. Do not overwrite existing # sem files to prevent clobbering those which may have been created from calls # outside of cloud-init. -sem_legacy="${sem/_/-}" +sem_legacy=$(echo $sem | sed 's/_/-/g') [ "$sem" != "$sem_legacy" -a -e "$sem_legacy" ] && mv -n "$sem_legacy" "$sem" [ "$freq" != "always" -a -e "$sem" ] && exit 0 -- cgit v1.2.3 From b76714c355a87416f9f07156b0f025aceaca7296 Mon Sep 17 00:00:00 2001 From: Risto Oikarinen Date: Tue, 9 Apr 2019 18:05:24 +0000 Subject: Change DataSourceNoCloud to ignore file system label's case. NoCloud data source now accepts both 'cidata' and 'CIDATA' as filesystem labels. This is similar to DataSourceConfigDrive's support for 'config-2' and 'CONFIG-2'. --- cloudinit/sources/DataSourceNoCloud.py | 4 ++- doc/rtd/topics/datasources/nocloud.rst | 2 +- tests/unittests/test_datasource/test_nocloud.py | 42 +++++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 ++++++++++ tools/ds-identify | 7 +++-- 5 files changed, 67 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 6860f0cc..fcf5d589 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -106,7 +106,9 @@ class DataSourceNoCloud(sources.DataSource): fslist = util.find_devs_with("TYPE=vfat") fslist.extend(util.find_devs_with("TYPE=iso9660")) - label_list = util.find_devs_with("LABEL=%s" % label) + label_list = util.find_devs_with("LABEL=%s" % label.upper()) + label_list.extend(util.find_devs_with("LABEL=%s" % label.lower())) + devlist = list(set(fslist) & set(label_list)) devlist.sort(reverse=True) diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 08578e86..1c5cf961 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -9,7 +9,7 @@ network at all). You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be -``cidata``. +``cidata`` or ``CIDATA``. Alternatively, you can provide meta-data via kernel command line or SMBIOS "serial number" option. The data must be passed in the form of a string: diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py index 3429272c..b785362f 100644 --- a/tests/unittests/test_datasource/test_nocloud.py +++ b/tests/unittests/test_datasource/test_nocloud.py @@ -32,6 +32,36 @@ class TestNoCloudDataSource(CiTestCase): self.mocks.enter_context( mock.patch.object(util, 'read_dmi_data', return_value=None)) + def _test_fs_config_is_read(self, fs_label, fs_label_to_search): + vfat_device = 'device-1' + + def m_mount_cb(device, callback, mtype): + if (device == vfat_device): + return {'meta-data': yaml.dump({'instance-id': 'IID'})} + else: + return {} + + def m_find_devs_with(query='', path=''): + if 'TYPE=vfat' == query: + return [vfat_device] + elif 'LABEL={}'.format(fs_label) == query: + return [vfat_device] + else: + return [] + + self.mocks.enter_context( + mock.patch.object(util, 'find_devs_with', + side_effect=m_find_devs_with)) + self.mocks.enter_context( + mock.patch.object(util, 'mount_cb', + side_effect=m_mount_cb)) + sys_cfg = {'datasource': {'NoCloud': {'fs_label': fs_label_to_search}}} + dsrc = dsNoCloud(sys_cfg=sys_cfg, distro=None, paths=self.paths) + ret = dsrc.get_data() + + self.assertEqual(dsrc.metadata.get('instance-id'), 'IID') + self.assertTrue(ret) + def test_nocloud_seed_dir_on_lxd(self, m_is_lxd): md = {'instance-id': 'IID', 'dsmode': 'local'} ud = b"USER_DATA_HERE" @@ -90,6 +120,18 @@ class TestNoCloudDataSource(CiTestCase): ret = dsrc.get_data() self.assertFalse(ret) + def test_fs_config_lowercase_label(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'cidata') + + def test_fs_config_uppercase_label(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'cidata') + + def test_fs_config_lowercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('cidata', 'CIDATA') + + def test_fs_config_uppercase_label_search_uppercase(self, m_is_lxd): + self._test_fs_config_is_read('CIDATA', 'CIDATA') + def test_no_datasource_expected(self, m_is_lxd): # no source should be found if no cmdline, config, and fs_label=None sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}} diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index d00c1b4b..8c18aa1a 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -520,6 +520,10 @@ class TestDsIdentify(DsIdentifyBase): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') + def test_nocloud_upper(self): + """NoCloud is found with uppercase filesystem label.""" + self._test_ds_found('NoCloudUpper') + def test_nocloud_seed(self): """Nocloud seed directory.""" self._test_ds_found('NoCloud-seed') @@ -713,6 +717,19 @@ VALID_CFG = { 'dev/vdb': 'pretend iso content for cidata\n', } }, + 'NoCloudUpper': { + 'ds': 'NoCloud', + 'mocks': [ + MOCK_VIRT_IS_KVM, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + BLKID_UEFI_UBUNTU + + [{'DEVNAME': 'vdb', 'TYPE': 'iso9660', 'LABEL': 'CIDATA'}])}, + ], + 'files': { + 'dev/vdb': 'pretend iso content for cidata\n', + } + }, 'NoCloud-seed': { 'ds': 'NoCloud', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index b78b2731..6518901e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -620,7 +620,7 @@ dscheck_MAAS() { } dscheck_NoCloud() { - local fslabel="cidata" d="" + local fslabel="cidata CIDATA" d="" case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac @@ -632,9 +632,10 @@ dscheck_NoCloud() { check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done - if has_fs_with_label "${fslabel}"; then + if has_fs_with_label $fslabel; then return ${DS_FOUND} fi + return ${DS_NOT_FOUND} } @@ -762,7 +763,7 @@ is_cdrom_ovf() { # explicitly skip known labels of other types. rd_rdfe is azure. case "$label" in - config-2|CONFIG-2|rd_rdfe_stable*|cidata) return 1;; + config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac local idstr="http://schemas.dmtf.org/ovf/environment/1" -- cgit v1.2.3 From 3fb55ea85139f2d29ce32f124d099419fbd06f60 Mon Sep 17 00:00:00 2001 From: Chad Miller Date: Tue, 23 Apr 2019 17:07:39 +0000 Subject: tools/read-version: handle errors When the cloned branch was not the canonical upstream and tags were not available, tox would fail because tools/read-version would fail, and tragically never print the advice that is in tools/read-version about how to fix it. This changes tools/read-version to catch the exception that is elsewhere explicitly thrown and treat that too as an error it can handle. --- tools/read-version | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/read-version b/tools/read-version index e69c2ce0..6dca659e 100755 --- a/tools/read-version +++ b/tools/read-version @@ -71,9 +71,12 @@ if is_gitdir(_tdir) and which("git"): flags = ['--tags'] cmd = ['git', 'describe', '--abbrev=8', '--match=[0-9]*'] + flags - version = tiny_p(cmd).strip() + try: + version = tiny_p(cmd).strip() + except RuntimeError: + version = None - if not version.startswith(src_version): + if version is None or not version.startswith(src_version): sys.stderr.write("git describe version (%s) differs from " "cloudinit.version (%s)\n" % (version, src_version)) sys.stderr.write( -- cgit v1.2.3 From 7193b80e4ade638880bd66b1f208c049ffa24479 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 9 May 2019 18:35:17 +0000 Subject: freebsd: add chpasswd pkg in the image cc_set_passwords.py depends on chpasswd binary. --- tools/build-on-freebsd | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index d23fde2b..dc3b9747 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -9,6 +9,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; } depschecked=/tmp/c-i.dependencieschecked pkgs=" bash + chpasswd dmidecode e2fsprogs py27-Jinja2 @@ -17,6 +18,7 @@ pkgs=" py27-configobj py27-jsonpatch py27-jsonpointer + py27-jsonschema py27-oauthlib py27-requests py27-serial @@ -28,12 +30,9 @@ pkgs=" [ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" touch $depschecked -# Required but unavailable port/pkg: py27-jsonpatch py27-jsonpointer -# Luckily, the install step will take care of this by installing it from pypi... - # Build the code and install in /usr/local/: -python setup.py build -python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd +python2.7 setup.py build +python2.7 setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf -- cgit v1.2.3 From c951963ffa94145be1bd5f1cef7dba6007f75bd2 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Tue, 28 May 2019 16:23:30 +0000 Subject: freebsd: fix the name of cloudcfg VARIANT config/cloud.cfg.tmpl uses 'freebsd', not 'bsd' to identify FreeBSD. --- tools/render-cloudcfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 8b7cb875..0957c324 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,7 +4,7 @@ import argparse import os import sys -VARIANTS = ["bsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] +VARIANTS = ["freebsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -- cgit v1.2.3 From 19ddb1fb11434f860daee2238cdc23a56b9dc86f Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 30 May 2019 16:39:17 +0000 Subject: run-container: centos: comment out the repo mirrorlist In this way only the 'baseurl' mirror is used, which is easier to allow through firewalls and proxies. --- tools/run-container | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/run-container b/tools/run-container index 852f4d1e..1d24e15b 100755 --- a/tools/run-container +++ b/tools/run-container @@ -373,7 +373,7 @@ wait_for_boot() { inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" inside "$name" sed -i s/enabled=1/enabled=0/ \ /etc/yum/pluginconf.d/fastestmirror.conf - inside "$name" sh -c "sed -i '/^#baseurl=/s/#//' /etc/yum.repos.d/*.repo" + inside "$name" sh -c "sed -i '/^#baseurl=/s/#// ; s/^mirrorlist/#mirrorlist/' /etc/yum.repos.d/*.repo" else debug 1 "do not know how to configure proxy on $OS_NAME" fi -- cgit v1.2.3 From deaeb714a3582ff7f31e411bcdaf9669903e35f0 Mon Sep 17 00:00:00 2001 From: "Mark T. Voelker" Date: Mon, 3 Jun 2019 15:37:42 +0000 Subject: Allow identification of OpenStack by Asset Tag When OpenStack is deployed on some hypervisors (such as VMware vSphere), cloud-init doesn't detect that it needs to probe the metadata service because the DMI product name field can't be set to a field that is recognized by cloud-init. However, the asset tag field can be set via flavor extra specs or image metadata. A similar approach is already used to identify Open Telekom Cloud. This patch allows cloud init to recognize "OpenStack Nova" or "OpenStack Compute" in the asset tag field as an indication that the instance being configured is running on an OpenStack platform. LP: #1669875 --- tests/unittests/test_ds_identify.py | 20 ++++++++++++++++++++ tools/ds-identify | 8 ++++++++ 2 files changed, 28 insertions(+) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 8c18aa1a..7575223f 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -435,6 +435,14 @@ class TestDsIdentify(DsIdentifyBase): """Open Telecom identification.""" self._test_ds_found('OpenStack-OpenTelekom') + def test_openstack_asset_tag_nova(self): + """OpenStack identification via asset tag OpenStack Nova.""" + self._test_ds_found('OpenStack-AssetTag-Nova') + + def test_openstack_asset_tag_copute(self): + """OpenStack identification via asset tag OpenStack Compute.""" + self._test_ds_found('OpenStack-AssetTag-Compute') + def test_openstack_on_non_intel_is_maybe(self): """On non-Intel, openstack without dmi info is maybe. @@ -759,6 +767,18 @@ VALID_CFG = { 'files': {P_CHASSIS_ASSET_TAG: 'OpenTelekomCloud\n'}, 'mocks': [MOCK_VIRT_IS_XEN], }, + 'OpenStack-AssetTag-Nova': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Nova\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, + 'OpenStack-AssetTag-Compute': { + # VMware vSphere can't modify product-name, LP: #1669875 + 'ds': 'OpenStack', + 'files': {P_CHASSIS_ASSET_TAG: 'OpenStack Compute\n'}, + 'mocks': [MOCK_VIRT_IS_XEN], + }, 'OVF-seed': { 'ds': 'OVF', 'files': { diff --git a/tools/ds-identify b/tools/ds-identify index 6518901e..e16708f6 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -979,6 +979,14 @@ dscheck_OpenStack() { return ${DS_FOUND} fi + # LP: #1669875 : allow identification of OpenStack by asset tag + if dmi_chassis_asset_tag_matches "$nova"; then + return ${DS_FOUND} + fi + if dmi_chassis_asset_tag_matches "$compute"; then + return ${DS_FOUND} + fi + # LP: #1715241 : arch other than intel are not identified properly. case "$DI_UNAME_MACHINE" in i?86|x86_64) :;; -- cgit v1.2.3 From a0f863da274fcd631441ba38fa9c7dd438a56480 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Mon, 17 Jun 2019 17:43:46 +0000 Subject: tools/build-on-freebsd: update to python3 - use python3 by default - ability to use any Python version through the PYTHON env-var - indent with 4 spaces - use 'set -eux' - remove trailing whitespace - drop the cheetah dep, Jinja2 is enough --- tools/build-on-freebsd | 73 +++++++++++++++++++++++++++----------------------- 1 file changed, 40 insertions(+), 33 deletions(-) (limited to 'tools') diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index dc3b9747..8ae64567 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -3,36 +3,43 @@ # installing cloud-init. This script takes care of building and installing. It # will optionally make a first run at the end. +set -eux + fail() { echo "FAILED:" "$@" 1>&2; exit 1; } +PYTHON="${PYTHON:-python3}" +if [ ! $(which ${PYTHON}) ]; then + echo "Please install python first." + exit 1 +fi +py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, sys.version_info.minor))') + # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" - bash - chpasswd - dmidecode - e2fsprogs - py27-Jinja2 - py27-boto - py27-cheetah - py27-configobj - py27-jsonpatch - py27-jsonpointer - py27-jsonschema - py27-oauthlib - py27-requests - py27-serial - py27-six - py27-yaml - python - sudo + bash + chpasswd + dmidecode + e2fsprogs + $py_prefix-Jinja2 + $py_prefix-boto + $py_prefix-configobj + $py_prefix-jsonpatch + $py_prefix-jsonpointer + $py_prefix-jsonschema + $py_prefix-oauthlib + $py_prefix-requests + $py_prefix-serial + $py_prefix-six + $py_prefix-yaml + sudo " -[ -f "$depschecked" ] || pkg install ${pkgs} || fail "install packages" +[ -f "$depschecked" ] || pkg install --yes ${pkgs} || fail "install packages" touch $depschecked # Build the code and install in /usr/local/: -python2.7 setup.py build -python2.7 setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd +${PYTHON} setup.py build +${PYTHON} setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf @@ -40,21 +47,21 @@ echo 'cloudinit_enable="YES"' >> /etc/rc.conf echo "Installation completed." -if [ "$1" = "run" ]; then - echo "Ok, now let's see if it works." +if [ "$#" -gt 1 ] && [ "$1" = "run" ]; then + echo "Ok, now let's see if it works." - # Backup SSH keys - mv /etc/ssh/ssh_host_* /tmp/ + # Backup SSH keys + mv /etc/ssh/ssh_host_* /tmp/ - # Remove old metadata - rm -rf /var/lib/cloud + # Remove old metadata + rm -rf /var/lib/cloud - # Just log everything, quick&dirty - rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg + # Just log everything, quick&dirty + rm /usr/local/etc/cloud/cloud.cfg.d/05_logging.cfg - # Start: - /usr/local/etc/rc.d/cloudinit start + # Start: + /usr/local/etc/rc.d/cloudinit start - # Restore SSH keys - mv /tmp/ssh_host_* /etc/ssh/ + # Restore SSH keys + mv /tmp/ssh_host_* /etc/ssh/ fi -- cgit v1.2.3 From d9769c475d38a8c30084b1e7537ae3f0359ed3ad Mon Sep 17 00:00:00 2001 From: Pengpeng Sun Date: Tue, 16 Jul 2019 14:31:17 +0000 Subject: Add a cdrom size checker for OVF ds to ds-identify With a large size ISO file attached to iso dev, ds-identify might grep it entirely if iso dev is ISO9660, it takes very long time to start OS. Resolve this by: - Adding a checker to read the ISO size (from sysfs). If the size of the ISO filesystem is > 10MiB then the ISO will be ignored (logged as oversized). - Move the ovf vmware guest customization checker to be ahead of cdrom ovf checker, so no need check the ISO size if vmware guest customization is enabled. LP: #1806701 --- tests/unittests/test_ds_identify.py | 25 ++++++++++++++++++++++++ tools/ds-identify | 39 ++++++++++++++++++++++++------------- 2 files changed, 51 insertions(+), 13 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7575223f..587e6993 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -524,6 +524,30 @@ class TestDsIdentify(DsIdentifyBase): self._check_via_dict( ovf_cdrom_by_label, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_different_size(self): + """OVF is identified by well-known iso9660 labels.""" + ovf_cdrom_with_size = copy.deepcopy(VALID_CFG['OVF']) + + # Set cdrom size to 20480 (10MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '20480\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 204800 (100MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '204800\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_NOT_FOUND, policy_dmi="disabled") + + # Set cdrom size to 18432 (9MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '18432\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + + # Set cdrom size to 2048 (1MB in 512 byte units) + ovf_cdrom_with_size['files']['sys/class/block/sr0/size'] = '2048\n' + self._check_via_dict( + ovf_cdrom_with_size, rc=RC_FOUND, dslist=['OVF', DS_NONE]) + def test_default_nocloud_as_vdb_iso9660(self): """NoCloud is found with iso9660 filesystem on non-cdrom disk.""" self._test_ds_found('NoCloud') @@ -815,6 +839,7 @@ VALID_CFG = { ], 'files': { 'dev/sr0': 'pretend ovf iso has ' + OVF_MATCH_STRING + '\n', + 'sys/class/block/sr0/size': '2048\n', } }, 'OVF-guestinfo': { diff --git a/tools/ds-identify b/tools/ds-identify index e16708f6..0305e361 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -766,10 +766,34 @@ is_cdrom_ovf() { config-2|CONFIG-2|rd_rdfe_stable*|cidata|CIDATA) return 1;; esac + # skip device which size is 10MB or larger + local size="" sfile="${PATH_SYS_CLASS_BLOCK}/${dev##*/}/size" + [ -f "$sfile" ] || return 1 + read size <"$sfile" || { warn "failed reading from $sfile"; return 1; } + # size is in 512 byte units. so convert to MB (integer division) + if [ $((size/2048)) -ge 10 ]; then + debug 2 "$dev: size $((size/2048))MB is considered too large for OVF" + return 1 + fi + local idstr="http://schemas.dmtf.org/ovf/environment/1" grep --quiet --ignore-case "$idstr" "${PATH_ROOT}$dev" } +has_ovf_cdrom() { + # DI_ISO9660_DEVS is =label,=label2 + # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces + if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then + local oifs="$IFS" + # shellcheck disable=2086 + { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } + for tok in "$@"; do + is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return 0 + done + fi + return 1 +} + dscheck_OVF() { check_seed_dir ovf ovf-env.xml && return "${DS_FOUND}" @@ -780,20 +804,9 @@ dscheck_OVF() { ovf_vmware_transport_guestinfo && return "${DS_FOUND}" - # DI_ISO9660_DEVS is =label,=label2 - # like /dev/sr0=OVF-TRANSPORT,/dev/other=with spaces - if [ "${DI_ISO9660_DEVS#${UNAVAILABLE}:}" = "${DI_ISO9660_DEVS}" ]; then - local oifs="$IFS" - # shellcheck disable=2086 - { IFS=","; set -- ${DI_ISO9660_DEVS}; IFS="$oifs"; } - for tok in "$@"; do - is_cdrom_ovf "${tok%%=*}" "${tok#*=}" && return $DS_FOUND - done - fi + has_ovf_cdrom && return "${DS_FOUND}" - if ovf_vmware_guest_customization; then - return ${DS_FOUND} - fi + ovf_vmware_guest_customization && return "${DS_FOUND}" return ${DS_NOT_FOUND} } -- cgit v1.2.3 From a02c0c9aa24a16f1983a81fe5dbfadac3d7e0ad3 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 18 Jul 2019 19:53:50 +0000 Subject: cloud_tests: updates and fixes - Update paramiko and cryptography module versions (2.4.2) to address issues with algo and deprecation warnings. - Modify ssh keypair generation to work with updated paramiko - tools/xkvm sync with newer version from curtin - Update NoCloudKvm instance.py to work with updated xkvm - pass -name to instance, useful for debugging on shared host - Add cache_mode platform config; default to cache=none,aio=native - Switch to yaml.safe_load() in platforms.py --- integration-requirements.txt | 3 +- tests/cloud_tests/platforms.yaml | 1 + tests/cloud_tests/platforms/nocloudkvm/instance.py | 13 +++-- tests/cloud_tests/platforms/platforms.py | 2 +- tests/cloud_tests/setup_image.py | 3 +- tools/xkvm | 61 +++++++++++++++++++--- 6 files changed, 68 insertions(+), 15 deletions(-) (limited to 'tools') diff --git a/integration-requirements.txt b/integration-requirements.txt index 880d9886..fe5ad45d 100644 --- a/integration-requirements.txt +++ b/integration-requirements.txt @@ -10,7 +10,8 @@ unittest2 boto3==1.5.9 # ssh communication -paramiko==2.4.1 +paramiko==2.4.2 +cryptography==2.4.2 # lxd backend diff --git a/tests/cloud_tests/platforms.yaml b/tests/cloud_tests/platforms.yaml index 448aa98d..652a7051 100644 --- a/tests/cloud_tests/platforms.yaml +++ b/tests/cloud_tests/platforms.yaml @@ -66,5 +66,6 @@ platforms: {{ config_get("user.vendor-data", properties.default) }} nocloud-kvm: enabled: true + cache_mode: cache=none,aio=native # vi: ts=4 expandtab diff --git a/tests/cloud_tests/platforms/nocloudkvm/instance.py b/tests/cloud_tests/platforms/nocloudkvm/instance.py index 33ff3f24..96185b75 100644 --- a/tests/cloud_tests/platforms/nocloudkvm/instance.py +++ b/tests/cloud_tests/platforms/nocloudkvm/instance.py @@ -74,6 +74,8 @@ class NoCloudKVMInstance(Instance): self.pid_file = None self.console_file = None self.disk = image_path + self.cache_mode = platform.config.get('cache_mode', + 'cache=none,aio=native') self.meta_data = meta_data def shutdown(self, wait=True): @@ -113,7 +115,10 @@ class NoCloudKVMInstance(Instance): pass if self.pid_file: - os.remove(self.pid_file) + try: + os.remove(self.pid_file) + except Exception: + pass self.pid = None self._ssh_close() @@ -160,13 +165,13 @@ class NoCloudKVMInstance(Instance): self.ssh_port = self.get_free_port() cmd = ['./tools/xkvm', - '--disk', '%s,cache=unsafe' % self.disk, - '--disk', '%s,cache=unsafe' % seed, + '--disk', '%s,%s' % (self.disk, self.cache_mode), + '--disk', '%s' % seed, '--netdev', ','.join(['user', 'hostfwd=tcp::%s-:22' % self.ssh_port, 'dnssearch=%s' % CI_DOMAIN]), '--', '-pidfile', self.pid_file, '-vnc', 'none', - '-m', '2G', '-smp', '2', '-nographic', + '-m', '2G', '-smp', '2', '-nographic', '-name', self.name, '-serial', 'file:' + self.console_file] subprocess.Popen(cmd, close_fds=True, diff --git a/tests/cloud_tests/platforms/platforms.py b/tests/cloud_tests/platforms/platforms.py index abbfebba..bebdf1c6 100644 --- a/tests/cloud_tests/platforms/platforms.py +++ b/tests/cloud_tests/platforms/platforms.py @@ -48,7 +48,7 @@ class Platform(object): if os.path.exists(filename): c_util.del_file(filename) - c_util.subp(['ssh-keygen', '-t', 'rsa', '-b', '4096', + c_util.subp(['ssh-keygen', '-m', 'PEM', '-t', 'rsa', '-b', '4096', '-f', filename, '-P', '', '-C', 'ubuntu@cloud_test'], capture=True) diff --git a/tests/cloud_tests/setup_image.py b/tests/cloud_tests/setup_image.py index 39f4517f..a8aaba15 100644 --- a/tests/cloud_tests/setup_image.py +++ b/tests/cloud_tests/setup_image.py @@ -222,7 +222,8 @@ def setup_image(args, image): for name, func, desc in handlers if getattr(args, name, None)] try: - data = yaml.load(image.read_data("/etc/cloud/build.info", decode=True)) + data = yaml.safe_load( + image.read_data("/etc/cloud/build.info", decode=True)) info = ' '.join(["%s=%s" % (k, data.get(k)) for k in ("build_name", "serial") if k in data]) except Exception as e: diff --git a/tools/xkvm b/tools/xkvm index a30ba916..8d44cad7 100755 --- a/tools/xkvm +++ b/tools/xkvm @@ -1,4 +1,6 @@ #!/bin/bash +# This file is part of cloud-init. +# See LICENSE file for copyright and license info. set -f @@ -11,6 +13,8 @@ TAPDEVS=( ) # OVS_CLEANUP gets populated with bridge:devname pairs used with ovs OVS_CLEANUP=( ) MAC_PREFIX="52:54:00:12:34" +# allow this to be set externally. +_QEMU_SUPPORTS_FILE_LOCKING="${_QEMU_SUPPORTS_FILE_LOCKING}" KVM="kvm" declare -A KVM_DEVOPTS @@ -119,6 +123,21 @@ isdevopt() { return 1 } +qemu_supports_file_locking() { + # hackily check if qemu has file.locking in -drive params (LP: #1716028) + if [ -z "$_QEMU_SUPPORTS_FILE_LOCKING" ]; then + # The only way we could find to check presense of file.locking is + # qmp (query-qmp-schema). Simply checking if the virtio-blk driver + # supports 'share-rw' is expected to be equivalent and simpler. + isdevopt virtio-blk share-rw && + _QEMU_SUPPORTS_FILE_LOCKING=true || + _QEMU_SUPPORTS_FILE_LOCKING=false + debug 1 "qemu supports file locking = ${_QEMU_SUPPORTS_FILE_LOCKING}" + fi + [ "$_QEMU_SUPPORTS_FILE_LOCKING" = "true" ] + return +} + padmac() { # return a full mac, given a subset. # assume whatever is input is the last portion to be @@ -367,7 +386,7 @@ main() { [ ${#netdevs[@]} -eq 0 ] && netdevs=( "${DEF_BRIDGE}" ) pt=( "$@" ) - local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" + local kvm_pkg="" virtio_scsi_bus="virtio-scsi-pci" virtio_rng_device="virtio-rng-pci" [ -n "$kvm" ] && kvm_pkg="none" case $(uname -m) in i?86) @@ -382,7 +401,10 @@ main() { [ -n "$kvm" ] || { kvm="qemu-system-s390x"; kvm_pkg="qemu-system-misc"; } def_netmodel=${DEF_NETMODEL:-"virtio-net-ccw"} + # disable virtio-scsi-bus virtio_scsi_bus="virtio-scsi-ccw" + virtio_blk_bus="virtio-blk-ccw" + virtio_rng_device="virtio-rng-ccw" ;; ppc64*) [ -n "$kvm" ] || @@ -408,7 +430,7 @@ main() { bios_opts=( "${_RET[@]}" ) local out="" fmt="" bus="" unit="" index="" serial="" driver="" devopts="" - local busorindex="" driveopts="" cur="" val="" file="" + local busorindex="" driveopts="" cur="" val="" file="" wwn="" for((i=0;i<${#diskdevs[@]};i++)); do cur=${diskdevs[$i]} IFS=","; set -- $cur; IFS="$oifs" @@ -420,6 +442,7 @@ main() { unit="" index="" serial="" + wwn="" for tok in "$@"; do [ "${tok#*=}" = "${tok}" -a -f "${tok}" -a -z "$file" ] && file="$tok" val=${tok#*=} @@ -433,6 +456,7 @@ main() { file=*) file=$val;; fmt=*|format=*) fmt=$val;; serial=*) serial=$val;; + wwn=*) wwn=$val;; bus=*) bus=$val;; unit=*) unit=$val;; index=*) index=$val;; @@ -443,14 +467,19 @@ main() { out=$(LANG=C qemu-img info "$file") && fmt=$(echo "$out" | awk '$0 ~ /^file format:/ { print $3 }') || { error "failed to determine format of $file"; return 1; } - else + elif [ -z "$fmt" ]; then fmt=raw fi if [ -z "$driver" ]; then driver="$def_disk_driver" fi if [ -z "$serial" ]; then - serial="${file##*/}" + # use filename as serial if not provided a wwn + if [ -n "$wwn" ]; then + serial="$wwn" + else + serial="${file##*/}" + fi fi # make sure we add either bus= or index= @@ -470,11 +499,21 @@ main() { id=*|if=*|driver=*|$file|file=*) continue;; fmt=*|format=*) continue;; serial=*|bus=*|unit=*|index=*) continue;; + file.locking=*) + qemu_supports_file_locking || { + debug 2 "qemu has no file locking." \ + "Dropping '$tok' from: $cur" + continue + };; esac isdevopt "$driver" "$tok" && devopts="${devopts},$tok" || diskopts="${diskopts},${tok}" done - + case $driver in + virtio-blk-ccw) + # disable scsi when using virtio-blk-ccw + devopts="${devopts},scsi=off";; + esac diskargs=( "${diskargs[@]}" -drive "$diskopts" -device "$devopts" ) done @@ -623,10 +662,16 @@ main() { done local bus_devices - bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) - cmd=( "${kvmcmd[@]}" "${archopts[@]}" + if [ -n "${virtio_scsi_bus}" ]; then + bus_devices=( -device "$virtio_scsi_bus,id=virtio-scsi-xkvm" ) + fi + local rng_devices + rng_devices=( -object "rng-random,filename=/dev/urandom,id=objrng0" + -device "$virtio_rng_device,rng=objrng0,id=rng0" ) + cmd=( "${kvmcmd[@]}" "${archopts[@]}" "${bios_opts[@]}" "${bus_devices[@]}" + "${rng_devices[@]}" "${netargs[@]}" "${diskargs[@]}" "${pt[@]}" ) local pcmd=$(quote_cmd "${cmd[@]}") @@ -661,4 +706,4 @@ else main "$@" fi -# vi: ts=4 expandtab +# vi: ts=4 expandtab syntax=sh -- cgit v1.2.3 From 4dfed67d0e82970f8717d0b524c593962698ca4f Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Thu, 8 Aug 2019 17:09:57 +0000 Subject: New data source for the Exoscale.com cloud platform - dsidentify switches to the new Exoscale datasource on matching DMI name - New Exoscale datasource added Signed-off-by: Mathieu Corbin --- cloudinit/apport.py | 1 + cloudinit/settings.py | 1 + cloudinit/sources/DataSourceExoscale.py | 258 +++++++++++++++++++++++ doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/exoscale.rst | 68 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_exoscale.py | 203 ++++++++++++++++++ tools/ds-identify | 7 +- 8 files changed, 540 insertions(+), 1 deletion(-) create mode 100644 cloudinit/sources/DataSourceExoscale.py create mode 100644 doc/rtd/topics/datasources/exoscale.rst create mode 100644 tests/unittests/test_datasource/test_exoscale.py (limited to 'tools') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 22cb7fde..003ff1ff 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -23,6 +23,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Exoscale', 'Hetzner Cloud', 'IBM - (aka SoftLayer or BlueMix)', 'LXD', diff --git a/cloudinit/settings.py b/cloudinit/settings.py index b1ebaade..2060d81f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -39,6 +39,7 @@ CFG_BUILTIN = { 'Hetzner', 'IBMCloud', 'Oracle', + 'Exoscale', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceExoscale.py b/cloudinit/sources/DataSourceExoscale.py new file mode 100644 index 00000000..52e7f6f6 --- /dev/null +++ b/cloudinit/sources/DataSourceExoscale.py @@ -0,0 +1,258 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import ec2_utils as ec2 +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + +METADATA_URL = "http://169.254.169.254" +API_VERSION = "1.0" +PASSWORD_SERVER_PORT = 8080 + +URL_TIMEOUT = 10 +URL_RETRIES = 6 + +EXOSCALE_DMI_NAME = "Exoscale" + +BUILTIN_DS_CONFIG = { + # We run the set password config module on every boot in order to enable + # resetting the instance's password via the exoscale console (and a + # subsequent instance reboot). + 'cloud_config_modules': [["set-passwords", "always"]] +} + + +class DataSourceExoscale(sources.DataSource): + + dsname = 'Exoscale' + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceExoscale, self).__init__(sys_cfg, distro, paths) + LOG.debug("Initializing the Exoscale datasource") + + self.metadata_url = self.ds_cfg.get('metadata_url', METADATA_URL) + self.api_version = self.ds_cfg.get('api_version', API_VERSION) + self.password_server_port = int( + self.ds_cfg.get('password_server_port', PASSWORD_SERVER_PORT)) + self.url_timeout = self.ds_cfg.get('timeout', URL_TIMEOUT) + self.url_retries = self.ds_cfg.get('retries', URL_RETRIES) + + self.extra_config = BUILTIN_DS_CONFIG + + def wait_for_metadata_service(self): + """Wait for the metadata service to be reachable.""" + + metadata_url = "{}/{}/meta-data/instance-id".format( + self.metadata_url, self.api_version) + + url = url_helper.wait_for_url( + urls=[metadata_url], + max_wait=self.url_max_wait, + timeout=self.url_timeout, + status_cb=LOG.critical) + + return bool(url) + + def crawl_metadata(self): + """ + Crawl the metadata service when available. + + @returns: Dictionary of crawled metadata content. + """ + metadata_ready = util.log_time( + logfunc=LOG.info, + msg='waiting for the metadata service', + func=self.wait_for_metadata_service) + + if not metadata_ready: + return {} + + return read_metadata(self.metadata_url, self.api_version, + self.password_server_port, self.url_timeout, + self.url_retries) + + def _get_data(self): + """Fetch the user data, the metadata and the VM password + from the metadata service. + + Please refer to the datasource documentation for details on how the + metadata server and password server are crawled. + """ + if not self._is_platform_viable(): + return False + + data = util.log_time( + logfunc=LOG.debug, + msg='Crawl of metadata service', + func=self.crawl_metadata) + + if not data: + return False + + self.userdata_raw = data['user-data'] + self.metadata = data['meta-data'] + password = data.get('password') + + password_config = {} + if password: + # Since we have a password, let's make sure we are allowed to use + # it by allowing ssh_pwauth. + # The password module's default behavior is to leave the + # configuration as-is in this regard, so that means it will either + # leave the password always disabled if no password is ever set, or + # leave the password login enabled if we set it once. + password_config = { + 'ssh_pwauth': True, + 'password': password, + 'chpasswd': { + 'expire': False, + }, + } + + # builtin extra_config overrides password_config + self.extra_config = util.mergemanydict( + [self.extra_config, password_config]) + + return True + + def get_config_obj(self): + return self.extra_config + + def _is_platform_viable(self): + return util.read_dmi_data('system-product-name').startswith( + EXOSCALE_DMI_NAME) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceExoscale, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +def get_password(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Obtain the VM's password if set. + + Once fetched the password is marked saved. Future calls to this method may + return empty string or 'saved_password'.""" + password_url = "{}:{}/{}/".format(metadata_url, password_server_port, + api_version) + response = url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "send_my_password"}, + timeout=url_timeout, + retries=url_retries) + password = response.contents.decode('utf-8') + # the password is empty or already saved + # Note: the original metadata server would answer an additional + # 'bad_request' status, but the Exoscale implementation does not. + if password in ['', 'saved_password']: + return None + # save the password + url_helper.read_file_or_url( + password_url, + ssl_details=None, + headers={"DomU_Request": "saved_password"}, + timeout=url_timeout, + retries=url_retries) + return password + + +def read_metadata(metadata_url=METADATA_URL, + api_version=API_VERSION, + password_server_port=PASSWORD_SERVER_PORT, + url_timeout=URL_TIMEOUT, + url_retries=URL_RETRIES): + """Query the metadata server and return the retrieved data.""" + crawled_metadata = {} + crawled_metadata['_metadata_api_version'] = api_version + try: + crawled_metadata['user-data'] = ec2.get_instance_userdata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + crawled_metadata['meta-data'] = ec2.get_instance_metadata( + api_version, + metadata_url, + timeout=url_timeout, + retries=url_retries) + except Exception as e: + util.logexc(LOG, "failed reading from metadata url %s (%s)", + metadata_url, e) + return {} + + try: + crawled_metadata['password'] = get_password( + api_version=api_version, + metadata_url=metadata_url, + password_server_port=password_server_port, + url_retries=url_retries, + url_timeout=url_timeout) + except Exception as e: + util.logexc(LOG, "failed to read from password server url %s:%s (%s)", + metadata_url, password_server_port, e) + + return crawled_metadata + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query Exoscale Metadata') + parser.add_argument( + "--endpoint", + metavar="URL", + help="The url of the metadata service.", + default=METADATA_URL) + parser.add_argument( + "--version", + metavar="VERSION", + help="The version of the metadata endpoint to query.", + default=API_VERSION) + parser.add_argument( + "--retries", + metavar="NUM", + type=int, + help="The number of retries querying the endpoint.", + default=URL_RETRIES) + parser.add_argument( + "--timeout", + metavar="NUM", + type=int, + help="The time in seconds to wait before timing out.", + default=URL_TIMEOUT) + parser.add_argument( + "--password-port", + metavar="PORT", + type=int, + help="The port on which the password endpoint listens", + default=PASSWORD_SERVER_PORT) + + args = parser.parse_args() + + data = read_metadata( + metadata_url=args.endpoint, + api_version=args.version, + password_server_port=args.password_port, + url_timeout=args.timeout, + url_retries=args.retries) + + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 648c6068..2148cd5e 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -155,6 +155,7 @@ Follow for more information. datasources/configdrive.rst datasources/digitalocean.rst datasources/ec2.rst + datasources/exoscale.rst datasources/maas.rst datasources/nocloud.rst datasources/opennebula.rst diff --git a/doc/rtd/topics/datasources/exoscale.rst b/doc/rtd/topics/datasources/exoscale.rst new file mode 100644 index 00000000..27aec9cd --- /dev/null +++ b/doc/rtd/topics/datasources/exoscale.rst @@ -0,0 +1,68 @@ +.. _datasource_exoscale: + +Exoscale +======== + +This datasource supports reading from the metadata server used on the +`Exoscale platform `_. + +Use of the Exoscale datasource is recommended to benefit from new features of +the Exoscale platform. + +The datasource relies on the availability of a compatible metadata server +(``http://169.254.169.254`` is used by default) and its companion password +server, reachable at the same address (by default on port 8080). + +Crawling of metadata +-------------------- + +The metadata service and password server are crawled slightly differently: + + * The "metadata service" is crawled every boot. + * The password server is also crawled every boot (the Exoscale datasource + forces the password module to run with "frequency always"). + +In the password server case, the following rules apply in order to enable the +"restore instance password" functionality: + + * If a password is returned by the password server, it is then marked "saved" + by the cloud-init datasource. Subsequent boots will skip setting the password + (the password server will return "saved_password"). + * When the instance password is reset (via the Exoscale UI), the password + server will return the non-empty password at next boot, therefore causing + cloud-init to reset the instance's password. + +Configuration +------------- + +Users of this datasource are discouraged from changing the default settings +unless instructed to by Exoscale support. + +The following settings are available and can be set for the datasource in system +configuration (in `/etc/cloud/cloud.cfg.d/`). + +The settings available are: + + * **metadata_url**: The URL for the metadata service (defaults to + ``http://169.254.169.254``) + * **api_version**: The API version path on which to query the instance metadata + (defaults to ``1.0``) + * **password_server_port**: The port (on the metadata server) on which the + password server listens (defaults to ``8080``). + * **timeout**: the timeout value provided to urlopen for each individual http + request. (defaults to ``10``) + * **retries**: The number of retries that should be done for an http request + (defaults to ``6``) + + +An example configuration with the default values is provided below: + +.. sourcecode:: yaml + + datasource: + Exoscale: + metadata_url: "http://169.254.169.254" + api_version: "1.0" + password_server_port: 8080 + timeout: 10 + retries: 6 diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2a9cfb29..61a7a762 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -13,6 +13,7 @@ from cloudinit.sources import ( DataSourceConfigDrive as ConfigDrive, DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, + DataSourceExoscale as Exoscale, DataSourceGCE as GCE, DataSourceHetzner as Hetzner, DataSourceIBMCloud as IBMCloud, @@ -53,6 +54,7 @@ DEFAULT_NETWORK = [ CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, Ec2.DataSourceEc2, + Exoscale.DataSourceExoscale, GCE.DataSourceGCE, MAAS.DataSourceMAAS, NoCloud.DataSourceNoCloudNet, diff --git a/tests/unittests/test_datasource/test_exoscale.py b/tests/unittests/test_datasource/test_exoscale.py new file mode 100644 index 00000000..350c3304 --- /dev/null +++ b/tests/unittests/test_datasource/test_exoscale.py @@ -0,0 +1,203 @@ +# Author: Mathieu Corbin +# Author: Christopher Glass +# +# This file is part of cloud-init. See LICENSE file for license information. +from cloudinit import helpers +from cloudinit.sources.DataSourceExoscale import ( + API_VERSION, + DataSourceExoscale, + METADATA_URL, + get_password, + PASSWORD_SERVER_PORT, + read_metadata) +from cloudinit.tests.helpers import HttprettyTestCase, mock + +import httpretty +import requests + + +TEST_PASSWORD_URL = "{}:{}/{}/".format(METADATA_URL, + PASSWORD_SERVER_PORT, + API_VERSION) + +TEST_METADATA_URL = "{}/{}/meta-data/".format(METADATA_URL, + API_VERSION) + +TEST_USERDATA_URL = "{}/{}/user-data".format(METADATA_URL, + API_VERSION) + + +@httpretty.activate +class TestDatasourceExoscale(HttprettyTestCase): + + def setUp(self): + super(TestDatasourceExoscale, self).setUp() + self.tmp = self.tmp_dir() + self.password_url = TEST_PASSWORD_URL + self.metadata_url = TEST_METADATA_URL + self.userdata_url = TEST_USERDATA_URL + + def test_password_saved(self): + """The password is not set when it is not found + in the metadata service.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="saved_password") + self.assertFalse(get_password()) + + def test_password_empty(self): + """No password is set if the metadata service returns + an empty string.""" + httpretty.register_uri(httpretty.GET, + self.password_url, + body="") + self.assertFalse(get_password()) + + def test_password(self): + """The password is set to what is found in the metadata + service.""" + expected_password = "p@ssw0rd" + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + password = get_password() + self.assertEqual(expected_password, password) + + def test_get_data(self): + """The datasource conforms to expected behavior when supplied + full test data.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_password = "p@ssw0rd" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_password) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'ssh_pwauth': True, + 'password': expected_password, + 'cloud_config_modules': [ + ["set-passwords", "always"]], + 'chpasswd': { + 'expire': False, + }}) + + def test_get_data_saved_password(self): + """The datasource conforms to expected behavior when saved_password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "saved_password" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + def test_get_data_no_password(self): + """The datasource conforms to expected behavior when no password is + returned by the password server.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: True + expected_answer = "" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.password_url, + body=expected_answer) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + self.assertTrue(ds._get_data()) + self.assertEqual(ds.userdata_raw.decode("utf-8"), "#cloud-config") + self.assertEqual(ds.metadata, {"instance-id": expected_id, + "local-hostname": expected_hostname}) + self.assertEqual(ds.get_config_obj(), + {'cloud_config_modules': [ + ["set-passwords", "always"]]}) + + @mock.patch('cloudinit.sources.DataSourceExoscale.get_password') + def test_read_metadata_when_password_server_unreachable(self, m_password): + """The read_metadata function returns partial results in case the + password server (only) is unreachable.""" + expected_id = "12345" + expected_hostname = "myname" + expected_userdata = "#cloud-config" + + m_password.side_effect = requests.Timeout('Fake Connection Timeout') + httpretty.register_uri(httpretty.GET, + self.userdata_url, + body=expected_userdata) + httpretty.register_uri(httpretty.GET, + self.metadata_url, + body="instance-id\nlocal-hostname") + httpretty.register_uri(httpretty.GET, + "{}local-hostname".format(self.metadata_url), + body=expected_hostname) + httpretty.register_uri(httpretty.GET, + "{}instance-id".format(self.metadata_url), + body=expected_id) + + result = read_metadata() + + self.assertIsNone(result.get("password")) + self.assertEqual(result.get("user-data").decode("utf-8"), + expected_userdata) + + def test_non_viable_platform(self): + """The datasource fails fast when the platform is not viable.""" + path = helpers.Paths({'run_dir': self.tmp}) + ds = DataSourceExoscale({}, None, path) + ds._is_platform_viable = lambda: False + self.assertFalse(ds._get_data()) diff --git a/tools/ds-identify b/tools/ds-identify index 0305e361..e0d4865c 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -553,6 +553,11 @@ dscheck_CloudStack() { return $DS_NOT_FOUND } +dscheck_Exoscale() { + dmi_product_name_matches "Exoscale*" && return $DS_FOUND + return $DS_NOT_FOUND +} + dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ dmi_product_name_matches "CloudSigma" && return $DS_FOUND -- cgit v1.2.3 From e7f16e215ef4fd6a31ddd2f1b585bbb6508bff06 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 11 Sep 2019 21:09:34 +0000 Subject: Brightbox: restrict detection to require full domain match .brightbox.com The detection for brightbox in both ds-identify and in identify_brightbox would incorrectly match the domain 'bobrightbox', which is not a brightbox platform. The fix here is to restrict matching to '*.brightbox.com' rather than '*brightbox.com' Also, while here remove a url to bug 1661693 which added the knowledge of brightbox. --- cloudinit/sources/DataSourceEc2.py | 2 +- tests/unittests/test_ds_identify.py | 16 ++++++++++++++-- tools/ds-identify | 3 +-- 3 files changed, 16 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 5c017bfb..10107456 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -473,7 +473,7 @@ def identify_aws(data): def identify_brightbox(data): - if data['serial'].endswith('brightbox.com'): + if data['serial'].endswith('.brightbox.com'): return CloudNames.BRIGHTBOX diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 587e6993..de87be29 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -195,6 +195,10 @@ class DsIdentifyBase(CiTestCase): return self._check_via_dict( data, RC_FOUND, dslist=[data.get('ds'), DS_NONE]) + def _test_ds_not_found(self, name): + data = copy.deepcopy(VALID_CFG[name]) + return self._check_via_dict(data, RC_NOT_FOUND) + def _check_via_dict(self, data, rc, dslist=None, **kwargs): ret = self._call_via_dict(data, **kwargs) good = False @@ -244,9 +248,13 @@ class TestDsIdentify(DsIdentifyBase): self._test_ds_found('Ec2-xen') def test_brightbox_is_ec2(self): - """EC2: product_serial ends with 'brightbox.com'""" + """EC2: product_serial ends with '.brightbox.com'""" self._test_ds_found('Ec2-brightbox') + def test_bobrightbox_is_not_brightbox(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-brightbox-negative') + def test_gce_by_product_name(self): """GCE identifies itself with product_name.""" self._test_ds_found('GCE') @@ -724,7 +732,11 @@ VALID_CFG = { }, 'Ec2-brightbox': { 'ds': 'Ec2', - 'files': {P_PRODUCT_SERIAL: 'facc6e2f.brightbox.com\n'}, + 'files': {P_PRODUCT_SERIAL: 'srv-otuxg.gb1.brightbox.com\n'}, + }, + 'Ec2-brightbox-negative': { + 'ds': 'Ec2', + 'files': {P_PRODUCT_SERIAL: 'tricky-host.bobrightbox.com\n'}, }, 'GCE': { 'ds': 'GCE', diff --git a/tools/ds-identify b/tools/ds-identify index e0d4865c..2447d14f 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -891,9 +891,8 @@ ec2_identify_platform() { local default="$1" local serial="${DI_DMI_PRODUCT_SERIAL}" - # brightbox https://bugs.launchpad.net/cloud-init/+bug/1661693 case "$serial" in - *brightbox.com) _RET="Brightbox"; return 0;; + *.brightbox.com) _RET="Brightbox"; return 0;; esac # AWS http://docs.aws.amazon.com/AWSEC2/ -- cgit v1.2.3 From 571f7c36e89f67f4c2d1cacfd8f9269bf864d560 Mon Sep 17 00:00:00 2001 From: Shixin Ruan Date: Wed, 18 Sep 2019 13:15:25 +0000 Subject: Add datasource for ZStack platform. Zstack platform provides a AWS Ec2 metadata service, and identifies their platform to the guest by setting the 'chassis asset tag' to a string that ends with '.zstack.io'. LP: #1841181 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 16 ++++++++++++- doc/rtd/topics/datasources.rst | 1 + doc/rtd/topics/datasources/zstack.rst | 36 +++++++++++++++++++++++++++++ tests/unittests/test_datasource/test_ec2.py | 28 ++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++++++- tools/ds-identify | 5 ++++ 7 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 doc/rtd/topics/datasources/zstack.rst (limited to 'tools') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 003ff1ff..fde1f75b 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -37,6 +37,7 @@ KNOWN_CLOUD_NAMES = [ 'Scaleway', 'SmartOS', 'VMware', + 'ZStack', 'Other'] # Potentially clear text collected logs diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 10107456..6c72ace2 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -33,6 +33,7 @@ class CloudNames(object): ALIYUN = "aliyun" AWS = "aws" BRIGHTBOX = "brightbox" + ZSTACK = "zstack" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -477,10 +478,16 @@ def identify_brightbox(data): return CloudNames.BRIGHTBOX +def identify_zstack(data): + if data['asset_tag'].endswith('.zstack.io'): + return CloudNames.ZSTACK + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() - checks = (identify_aws, identify_brightbox, lambda x: CloudNames.UNKNOWN) + checks = (identify_aws, identify_brightbox, identify_zstack, + lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -498,6 +505,7 @@ def _collect_platform_data(): uuid: system-uuid from dmi or /sys/hypervisor uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) + asset_tag: 'dmidecode -s chassis-asset-tag' On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -520,6 +528,12 @@ def _collect_platform_data(): data['serial'] = serial.lower() + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag is None: + asset_tag = '' + + data['asset_tag'] = asset_tag.lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 8e58be97..a337c08c 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -45,6 +45,7 @@ The following is a list of documents for each supported datasource: datasources/oracle.rst datasources/ovf.rst datasources/smartos.rst + datasources/zstack.rst Creation diff --git a/doc/rtd/topics/datasources/zstack.rst b/doc/rtd/topics/datasources/zstack.rst new file mode 100644 index 00000000..36e60ffb --- /dev/null +++ b/doc/rtd/topics/datasources/zstack.rst @@ -0,0 +1,36 @@ +.. _datasource_zstack: + +ZStack +====== +ZStack platform provides a AWS Ec2 metadata service, but with different datasource identity. +More information about ZStack can be found at `ZStack `__. + +Discovery +--------- +To determine whether a vm running on ZStack platform, cloud-init checks DMI information +by 'dmidecode -s chassis-asset-tag', if the output ends with '.zstack.io', it's running +on ZStack platform: + + +Metadata +^^^^^^^^ +Same as EC2, instance metadata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/meta-data/ + instance-id + local-hostname + +Userdata +^^^^^^^^ +Same as EC2, instance userdata can be queried at + +:: + + GET http://169.254.169.254/2009-04-04/user-data/ + meta_data.json + user_data + password + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 1ec8e009..6fabf258 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -662,4 +662,32 @@ class TestConvertEc2MetadataNetworkConfig(test_helpers.CiTestCase): expected, ec2.convert_ec2_metadata_network_config(self.network_metadata)) + +class TesIdentifyPlatform(test_helpers.CiTestCase): + + def collmock(self, **kwargs): + """return non-special _collect_platform_data updated with changes.""" + unspecial = { + 'asset_tag': '3857-0037-2746-7462-1818-3997-77', + 'serial': 'H23-C4J3JV-R6', + 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', + 'uuid_source': 'dmi', + } + unspecial.update(**kwargs) + return unspecial + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack(self, m_collect): + """zstack should be identified if cassis-asset-tag ends in .zstack.io + """ + m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') + self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_zstack_full_domain_only(self, m_collect): + """zstack asset-tag matching should match only on full domain boundary. + """ + m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index de87be29..7aeeb91c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -609,6 +609,10 @@ class TestDsIdentify(DsIdentifyBase): self.assertEqual(expected, [p for p in expected if p in toks], "path did not have expected tokens") + def test_zstack_is_ec2(self): + """EC2: chassis asset tag ends with 'zstack.io'""" + self._test_ds_found('Ec2-ZStack') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -971,8 +975,11 @@ VALID_CFG = { {'name': 'blkid', 'ret': 2, 'out': ''}, ], 'files': {ds_smartos.METADATA_SOCKFILE: 'would be a socket\n'}, + }, + 'Ec2-ZStack': { + 'ds': 'Ec2', + 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, } - } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 2447d14f..f76f2a6e 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -895,6 +895,11 @@ ec2_identify_platform() { *.brightbox.com) _RET="Brightbox"; return 0;; esac + local asset_tag="${DI_DMI_CHASSIS_ASSET_TAG}" + case "$asset_tag" in + *.zstack.io) _RET="ZStack"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From 5d5a32e039782ce3e1c0843082fe26260fa9273a Mon Sep 17 00:00:00 2001 From: Conrad Hoffmann Date: Tue, 1 Oct 2019 20:43:29 +0000 Subject: Add support for Arch Linux in render-cloudcfg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit  - Detect Arch Linux and set variant accordingly in `system_info()`  - Allow setting render-cloudcfg variant parameter to 'arch'  - Adjust some basic settings for Arch Linux in the cloud.cfg.tmpl The template might need some additional Arch-specific tweaks in the future, but at least for now the generated config works and contains the most relevant modules. Also: - Sort distro variant lists when adding Arch - Add debian to known variants in render-cloudcfg --- cloudinit/util.py | 3 ++- config/cloud.cfg.tmpl | 6 ++++-- tools/render-cloudcfg | 3 ++- 3 files changed, 8 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/cloudinit/util.py b/cloudinit/util.py index 6e8e73b0..0d338ca7 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -656,7 +656,8 @@ def system_info(): var = 'unknown' if system == "linux": linux_dist = info['dist'][0].lower() - if linux_dist in ('centos', 'debian', 'fedora', 'rhel', 'suse'): + if linux_dist in ( + 'arch', 'centos', 'debian', 'fedora', 'rhel', 'suse'): var = linux_dist elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): var = 'ubuntu' diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 684c7473..87c37ba0 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -137,7 +137,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["centos", "debian", "fedora", "rhel", "suse", "ubuntu", "freebsd"] %} +{% if variant in ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %} distro: {{ variant }} {% else %} # Unknown/fallback distro. @@ -185,7 +185,7 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["centos", "rhel", "fedora", "suse"] %} +{% elif variant in ["arch", "centos", "fedora", "rhel", "suse"] %} # Default user name + that default users groups (if added/used) default_user: name: {{ variant }} @@ -193,6 +193,8 @@ system_info: gecos: {{ variant }} Cloud User {% if variant == "suse" %} groups: [cdrom, users] +{% elif variant == "arch" %} + groups: [wheel, users] {% else %} groups: [wheel, adm, systemd-journal] {% endif %} diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index 0957c324..a441f4ff 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,7 +4,8 @@ import argparse import os import sys -VARIANTS = ["freebsd", "centos", "fedora", "rhel", "suse", "ubuntu", "unknown"] +VARIANTS = ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", + "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -- cgit v1.2.3 From d3b1c4ae6bd237a04ba5df4306ff38f752f72132 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Fri, 4 Oct 2019 23:15:10 +0000 Subject: Add RbxCloud datasource --- cloudinit/sources/DataSourceRbxCloud.py | 250 ++++++++++++++++++++++++++++ doc/rtd/topics/datasources/rbxcloud.rst | 25 +++ tests/unittests/test_datasource/test_rbx.py | 208 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 17 +- tools/ds-identify | 7 +- 5 files changed, 505 insertions(+), 2 deletions(-) create mode 100644 cloudinit/sources/DataSourceRbxCloud.py create mode 100644 doc/rtd/topics/datasources/rbxcloud.rst create mode 100644 tests/unittests/test_datasource/test_rbx.py (limited to 'tools') diff --git a/cloudinit/sources/DataSourceRbxCloud.py b/cloudinit/sources/DataSourceRbxCloud.py new file mode 100644 index 00000000..9a8c3d5c --- /dev/null +++ b/cloudinit/sources/DataSourceRbxCloud.py @@ -0,0 +1,250 @@ +# Copyright (C) 2018 Warsaw Data Center +# +# Author: Malwina Leis +# Author: Grzegorz Brzeski +# Author: Adam Dobrawy +# +# This file is part of cloud-init. See LICENSE file for license information. +""" +This file contains code used to gather the user data passed to an +instance on rootbox / hyperone cloud platforms +""" +import errno +import os +import os.path + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import util +from cloudinit.event import EventType + +LOG = logging.getLogger(__name__) +ETC_HOSTS = '/etc/hosts' + + +def get_manage_etc_hosts(): + hosts = util.load_file(ETC_HOSTS, quiet=True) + if hosts: + LOG.debug('/etc/hosts exists - setting manage_etc_hosts to False') + return False + LOG.debug('/etc/hosts does not exists - setting manage_etc_hosts to True') + return True + + +def ip2int(addr): + parts = addr.split('.') + return (int(parts[0]) << 24) + (int(parts[1]) << 16) + \ + (int(parts[2]) << 8) + int(parts[3]) + + +def int2ip(addr): + return '.'.join([str(addr >> (i << 3) & 0xFF) for i in range(4)[::-1]]) + + +def _sub_arp(cmd): + """ + Uses the prefered cloud-init subprocess def of util.subp + and runs arping. Breaking this to a separate function + for later use in mocking and unittests + """ + return util.subp(['arping'] + cmd) + + +def gratuitous_arp(items, distro): + source_param = '-S' + if distro.name in ['fedora', 'centos', 'rhel']: + source_param = '-s' + for item in items: + _sub_arp([ + '-c', '2', + source_param, item['source'], + item['destination'] + ]) + + +def get_md(): + rbx_data = None + devices = [ + dev + for dev, bdata in util.blkid().items() + if bdata.get('LABEL', '').upper() == 'CLOUDMD' + ] + for device in devices: + try: + rbx_data = util.mount_cb( + device=device, + callback=read_user_data_callback, + mtype=['vfat', 'fat'] + ) + if rbx_data: + break + except OSError as err: + if err.errno != errno.ENOENT: + raise + except util.MountFailedError: + util.logexc(LOG, "Failed to mount %s when looking for user " + "data", device) + if not rbx_data: + util.logexc(LOG, "Failed to load metadata and userdata") + return False + return rbx_data + + +def generate_network_config(netadps): + """Generate network configuration + + @param netadps: A list of network adapter settings + + @returns: A dict containing network config + """ + return { + 'version': 1, + 'config': [ + { + 'type': 'physical', + 'name': 'eth{}'.format(str(i)), + 'mac_address': netadp['macaddress'].lower(), + 'subnets': [ + { + 'type': 'static', + 'address': ip['address'], + 'netmask': netadp['network']['netmask'], + 'control': 'auto', + 'gateway': netadp['network']['gateway'], + 'dns_nameservers': netadp['network']['dns'][ + 'nameservers'] + } for ip in netadp['ip'] + ], + } for i, netadp in enumerate(netadps) + ] + } + + +def read_user_data_callback(mount_dir): + """This callback will be applied by util.mount_cb() on the mounted + drive. + + @param mount_dir: String representing path of directory where mounted drive + is available + + @returns: A dict containing userdata, metadata and cfg based on metadata. + """ + meta_data = util.load_json( + text=util.load_file( + fname=os.path.join(mount_dir, 'cloud.json'), + decode=False + ) + ) + user_data = util.load_file( + fname=os.path.join(mount_dir, 'user.data'), + quiet=True + ) + if 'vm' not in meta_data or 'netadp' not in meta_data: + util.logexc(LOG, "Failed to load metadata. Invalid format.") + return None + username = meta_data.get('additionalMetadata', {}).get('username') + ssh_keys = meta_data.get('additionalMetadata', {}).get('sshKeys', []) + + hash = None + if meta_data.get('additionalMetadata', {}).get('password'): + hash = meta_data['additionalMetadata']['password']['sha512'] + + network = generate_network_config(meta_data['netadp']) + + data = { + 'userdata': user_data, + 'metadata': { + 'instance-id': meta_data['vm']['_id'], + 'local-hostname': meta_data['vm']['name'], + 'public-keys': [] + }, + 'gratuitous_arp': [ + { + "source": ip["address"], + "destination": target + } + for netadp in meta_data['netadp'] + for ip in netadp['ip'] + for target in [ + netadp['network']["gateway"], + int2ip(ip2int(netadp['network']["gateway"]) + 2), + int2ip(ip2int(netadp['network']["gateway"]) + 3) + ] + ], + 'cfg': { + 'ssh_pwauth': True, + 'disable_root': True, + 'system_info': { + 'default_user': { + 'name': username, + 'gecos': username, + 'sudo': ['ALL=(ALL) NOPASSWD:ALL'], + 'passwd': hash, + 'lock_passwd': False, + 'ssh_authorized_keys': ssh_keys, + 'shell': '/bin/bash' + } + }, + 'network_config': network, + 'manage_etc_hosts': get_manage_etc_hosts(), + }, + } + + LOG.debug('returning DATA object:') + LOG.debug(data) + + return data + + +class DataSourceRbxCloud(sources.DataSource): + update_events = {'network': [ + EventType.BOOT_NEW_INSTANCE, + EventType.BOOT + ]} + + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.seed = None + + def __str__(self): + root = sources.DataSource.__str__(self) + return "%s [seed=%s]" % (root, self.seed) + + def _get_data(self): + """ + Metadata is passed to the launching instance which + is used to perform instance configuration. + """ + rbx_data = get_md() + self.userdata_raw = rbx_data['userdata'] + self.metadata = rbx_data['metadata'] + self.gratuitous_arp = rbx_data['gratuitous_arp'] + self.cfg = rbx_data['cfg'] + return True + + @property + def network_config(self): + return self.cfg['network_config'] + + def get_public_ssh_keys(self): + return self.metadata['public-keys'] + + def get_userdata_raw(self): + return self.userdata_raw + + def get_config_obj(self): + return self.cfg + + def activate(self, cfg, is_new_instance): + gratuitous_arp(self.gratuitous_arp, self.distro) + + +# Used to match classes to dependencies +datasources = [ + (DataSourceRbxCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/doc/rtd/topics/datasources/rbxcloud.rst b/doc/rtd/topics/datasources/rbxcloud.rst new file mode 100644 index 00000000..3d465bed --- /dev/null +++ b/doc/rtd/topics/datasources/rbxcloud.rst @@ -0,0 +1,25 @@ +.. _datasource_config_drive: + +Rbx Cloud +========= + +The Rbx datasource consumes the metadata drive available on platform +`HyperOne`_ and `Rootbox`_ platform. + +Datasource supports, in particular, network configurations, hostname, +user accounts and user metadata. + +Metadata drive +-------------- + +Drive metadata is a `FAT`_-formatted partition with the ```CLOUDMD``` label on +the system disk. Its contents are refreshed each time the virtual machine +is restarted, if the partition exists. For more information see +`HyperOne docs`_. + +.. _HyperOne: http://www.hyperone.com/ +.. _Rootbox: https://rootbox.com/ +.. _HyperOne Virtual Machine docs: http://www.hyperone.com/ +.. _vfat: https://en.wikipedia.org/wiki/File_Allocation_Table + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_rbx.py b/tests/unittests/test_datasource/test_rbx.py new file mode 100644 index 00000000..aabf1f18 --- /dev/null +++ b/tests/unittests/test_datasource/test_rbx.py @@ -0,0 +1,208 @@ +import json + +from cloudinit import helpers +from cloudinit import distros +from cloudinit.sources import DataSourceRbxCloud as ds +from cloudinit.tests.helpers import mock, CiTestCase, populate_dir + +DS_PATH = "cloudinit.sources.DataSourceRbxCloud" + +CRYPTO_PASS = "$6$uktth46t$FvpDzFD2iL9YNZIG1Epz7957hJqbH0f" \ + "QKhnzcfBcUhEodGAWRqTy7tYG4nEW7SUOYBjxOSFIQW5" \ + "tToyGP41.s1" + +CLOUD_METADATA = { + "vm": { + "memory": 4, + "cpu": 2, + "name": "vm-image-builder", + "_id": "5beab44f680cffd11f0e60fc" + }, + "additionalMetadata": { + "username": "guru", + "sshKeys": ["ssh-rsa ..."], + "password": { + "sha512": CRYPTO_PASS + } + }, + "disk": [ + {"size": 10, "type": "ssd", + "name": "vm-image-builder-os", + "_id": "5beab450680cffd11f0e60fe"}, + {"size": 2, "type": "ssd", + "name": "ubuntu-1804-bionic", + "_id": "5bef002c680cffd11f107590"} + ], + "netadp": [ + { + "ip": [{"address": "62.181.8.174"}], + "network": { + "dns": {"nameservers": ["8.8.8.8", "8.8.4.4"]}, + "routing": [], + "gateway": "62.181.8.1", + "netmask": "255.255.248.0", + "name": "public", + "type": "public", + "_id": "5784e97be2627505227b578c" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:03", + "_id": "5beab450680cffd11f0e6102" + }, + { + "ip": [{"address": "10.209.78.11"}], + "network": { + "dns": {"nameservers": ["9.9.9.9", "8.8.8.8"]}, + "routing": [], + "gateway": "10.209.78.1", + "netmask": "255.255.255.0", + "name": "network-determined-bardeen", + "type": "private", + "_id": "5beaec64680cffd11f0e7c31" + }, + "speed": 1000, + "type": "hv", + "macaddress": "00:15:5D:FF:0F:24", + "_id": "5bec18c6680cffd11f0f0d8b" + } + ], + "dvddrive": [{"iso": {}}] +} + + +class TestRbxDataSource(CiTestCase): + parsed_user = None + allowed_subp = ['bash'] + + def _fetch_distro(self, kind): + cls = distros.fetch(kind) + paths = helpers.Paths({}) + return cls(kind, {}, paths) + + def setUp(self): + super(TestRbxDataSource, self).setUp() + self.tmp = self.tmp_dir() + self.paths = helpers.Paths( + {'cloud_dir': self.tmp, 'run_dir': self.tmp} + ) + + # defaults for few tests + self.ds = ds.DataSourceRbxCloud + self.seed_dir = self.paths.seed_dir + self.sys_cfg = {'datasource': {'RbxCloud': {'dsmode': 'local'}}} + + def test_seed_read_user_data_callback_empty_file(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, {}) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertIsNone(results) + + def test_seed_read_user_data_callback_valid_disk(self): + populate_user_metadata(self.seed_dir, '') + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertTrue('metadata' in results) + self.assertTrue('cfg' in results) + + def test_seed_read_user_data_callback_userdata(self): + userdata = "#!/bin/sh\nexit 1" + populate_user_metadata(self.seed_dir, userdata) + populate_cloud_metadata(self.seed_dir, CLOUD_METADATA) + + results = ds.read_user_data_callback(self.seed_dir) + + self.assertNotEqual(results, None) + self.assertTrue('userdata' in results) + self.assertEqual(results['userdata'], userdata) + + def test_generate_network_config(self): + expected = { + 'version': 1, + 'config': [ + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'netmask': '255.255.248.0', + 'address': '62.181.8.174', + 'type': 'static', 'gateway': '62.181.8.1'} + ], + 'type': 'physical', + 'name': 'eth0', + 'mac_address': '00:15:5d:ff:0f:03' + }, + { + 'subnets': [ + {'control': 'auto', + 'dns_nameservers': ['9.9.9.9', '8.8.8.8'], + 'netmask': '255.255.255.0', + 'address': '10.209.78.11', + 'type': 'static', + 'gateway': '10.209.78.1'} + ], + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:15:5d:ff:0f:24' + } + ] + } + self.assertTrue( + ds.generate_network_config(CLOUD_METADATA['netadp']), + expected + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_gratuitous_arp_run_standard_arping(self, m_subp): + """Test handle run arping & parameters.""" + items = [ + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104' + }, + { + 'destination': '172.17.0.2', + 'source': '172.16.6.104', + }, + ] + ds.gratuitous_arp(items, self._fetch_distro('ubuntu')) + self.assertEqual([ + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]), + mock.call([ + 'arping', '-c', '2', '-S', + '172.16.6.104', '172.17.0.2' + ]) + ], m_subp.call_args_list + ) + + @mock.patch(DS_PATH + '.util.subp') + def test_handle_rhel_like_arping(self, m_subp): + """Test handle on RHEL-like distros.""" + items = [ + { + 'source': '172.16.6.104', + 'destination': '172.17.0.2', + } + ] + ds.gratuitous_arp(items, self._fetch_distro('fedora')) + self.assertEqual([ + mock.call( + ['arping', '-c', '2', '-s', '172.16.6.104', '172.17.0.2'] + )], + m_subp.call_args_list + ) + + +def populate_cloud_metadata(path, data): + populate_dir(path, {'cloud.json': json.dumps(data)}) + + +def populate_user_metadata(path, data): + populate_dir(path, {'user.data': data}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 7aeeb91c..c5b5c46c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -267,10 +267,13 @@ class TestDsIdentify(DsIdentifyBase): """ConfigDrive datasource has a disk with LABEL=config-2.""" self._test_ds_found('ConfigDrive') + def test_rbx_cloud(self): + """Rbx datasource has a disk with LABEL=CLOUDMD.""" + self._test_ds_found('RbxCloud') + def test_config_drive_upper(self): """ConfigDrive datasource has a disk with LABEL=CONFIG-2.""" self._test_ds_found('ConfigDriveUpper') - return def test_config_drive_seed(self): """Config Drive seed directory.""" @@ -896,6 +899,18 @@ VALID_CFG = { os.path.join(P_SEED_DIR, 'config_drive', 'openstack', 'latest', 'meta_data.json'): 'md\n'}, }, + 'RbxCloud': { + 'ds': 'RbxCloud', + 'mocks': [ + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'vda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'vdb', 'TYPE': 'vfat', 'LABEL': 'CLOUDMD'}] + )}, + ], + }, 'Hetzner': { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, diff --git a/tools/ds-identify b/tools/ds-identify index f76f2a6e..40fc0604 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -124,7 +124,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale" +OVF SmartOS Scaleway Hetzner IBMCloud Oracle Exoscale RbxCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -702,6 +702,11 @@ dscheck_OpenNebula() { return ${DS_NOT_FOUND} } +dscheck_RbxCloud() { + has_fs_with_label "CLOUDMD" "cloudmd" && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + ovf_vmware_guest_customization() { # vmware guest customization -- cgit v1.2.3 From 7d5d34f3643a2108d667759f57a5ab63d0affadd Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 11 Oct 2019 14:54:49 +0000 Subject: Add Support for e24cloud to Ec2 datasource. e24cloud provides an EC2 compatible datasource. This just identifies their platform based on dmi 'system-vendor' having 'e24cloud'. https://www.e24cloud.com/en/ . Updated chassis typo in zstack unit test docstring. LP: #1696476 --- cloudinit/apport.py | 1 + cloudinit/sources/DataSourceEc2.py | 12 +++++++++++- doc/rtd/topics/datasources.rst | 3 ++- doc/rtd/topics/datasources/e24cloud.rst | 9 +++++++++ tests/unittests/test_datasource/test_ec2.py | 15 ++++++++++++++- tests/unittests/test_ds_identify.py | 18 +++++++++++++++++- tools/ds-identify | 5 +++++ 7 files changed, 59 insertions(+), 4 deletions(-) create mode 100644 doc/rtd/topics/datasources/e24cloud.rst (limited to 'tools') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index fde1f75b..c6797f12 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -22,6 +22,7 @@ KNOWN_CLOUD_NAMES = [ 'CloudSigma', 'CloudStack', 'DigitalOcean', + 'E24Cloud', 'GCE - Google Compute Engine', 'Exoscale', 'Hetzner Cloud', diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 6c72ace2..1d88c9b1 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -34,6 +34,7 @@ class CloudNames(object): AWS = "aws" BRIGHTBOX = "brightbox" ZSTACK = "zstack" + E24CLOUD = "e24cloud" # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "unknown" @@ -483,11 +484,16 @@ def identify_zstack(data): return CloudNames.ZSTACK +def identify_e24cloud(data): + if data['vendor'] == 'e24cloud': + return CloudNames.E24CLOUD + + def identify_platform(): # identify the platform and return an entry in CloudNames. data = _collect_platform_data() checks = (identify_aws, identify_brightbox, identify_zstack, - lambda x: CloudNames.UNKNOWN) + identify_e24cloud, lambda x: CloudNames.UNKNOWN) for checker in checks: try: result = checker(data) @@ -506,6 +512,7 @@ def _collect_platform_data(): uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' serial: dmi 'system-serial-number' (/sys/.../product_serial) asset_tag: 'dmidecode -s chassis-asset-tag' + vendor: dmi 'system-manufacturer' (/sys/.../sys_vendor) On Ec2 instances experimentation is that product_serial is upper case, and product_uuid is lower case. This returns lower case values for both. @@ -534,6 +541,9 @@ def _collect_platform_data(): data['asset_tag'] = asset_tag.lower() + vendor = util.read_dmi_data('system-manufacturer') + data['vendor'] = (vendor if vendor else '').lower() + return data diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index a337c08c..70fbe07d 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -29,8 +29,9 @@ The following is a list of documents for each supported datasource: datasources/aliyun.rst datasources/altcloud.rst - datasources/ec2.rst datasources/azure.rst + datasources/ec2.rst + datasources/e24cloud.rst datasources/cloudsigma.rst datasources/cloudstack.rst datasources/configdrive.rst diff --git a/doc/rtd/topics/datasources/e24cloud.rst b/doc/rtd/topics/datasources/e24cloud.rst new file mode 100644 index 00000000..de9a4127 --- /dev/null +++ b/doc/rtd/topics/datasources/e24cloud.rst @@ -0,0 +1,9 @@ +.. _datasource_e24cloud: + +E24Cloud +======== +`E24Cloud ` platform provides an AWS Ec2 metadata +service clone. It identifies itself to guests using the dmi +system-manufacturer (/sys/class/dmi/id/sys_vendor). + +.. vi: textwidth=78 diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py index 6fabf258..5e1dd777 100644 --- a/tests/unittests/test_datasource/test_ec2.py +++ b/tests/unittests/test_datasource/test_ec2.py @@ -672,13 +672,14 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): 'serial': 'H23-C4J3JV-R6', 'uuid': '81c7e555-6471-4833-9551-1ab366c4cfd2', 'uuid_source': 'dmi', + 'vendor': 'tothecloud', } unspecial.update(**kwargs) return unspecial @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') def test_identify_zstack(self, m_collect): - """zstack should be identified if cassis-asset-tag ends in .zstack.io + """zstack should be identified if chassis-asset-tag ends in .zstack.io """ m_collect.return_value = self.collmock(asset_tag='123456.zstack.io') self.assertEqual(ec2.CloudNames.ZSTACK, ec2.identify_platform()) @@ -690,4 +691,16 @@ class TesIdentifyPlatform(test_helpers.CiTestCase): m_collect.return_value = self.collmock(asset_tag='123456.buzzstack.io') self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloud') + self.assertEqual(ec2.CloudNames.E24CLOUD, ec2.identify_platform()) + + @mock.patch('cloudinit.sources.DataSourceEc2._collect_platform_data') + def test_identify_e24cloud_negative(self, m_collect): + """e24cloud identified if vendor is e24cloud""" + m_collect.return_value = self.collmock(vendor='e24cloudyday') + self.assertEqual(ec2.CloudNames.UNKNOWN, ec2.identify_platform()) + # vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index c5b5c46c..12c6ae36 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -616,6 +616,14 @@ class TestDsIdentify(DsIdentifyBase): """EC2: chassis asset tag ends with 'zstack.io'""" self._test_ds_found('Ec2-ZStack') + def test_e24cloud_is_ec2(self): + """EC2: e24cloud identified by sys_vendor""" + self._test_ds_found('Ec2-E24Cloud') + + def test_e24cloud_not_active(self): + """EC2: bobrightbox.com in product_serial is not brightbox'""" + self._test_ds_not_found('Ec2-E24Cloud-negative') + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -994,7 +1002,15 @@ VALID_CFG = { 'Ec2-ZStack': { 'ds': 'Ec2', 'files': {P_CHASSIS_ASSET_TAG: '123456.zstack.io\n'}, - } + }, + 'Ec2-E24Cloud': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloud\n'}, + }, + 'Ec2-E24Cloud-negative': { + 'ds': 'Ec2', + 'files': {P_SYS_VENDOR: 'e24cloudyday\n'}, + } } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index 40fc0604..20a99ee9 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -905,6 +905,11 @@ ec2_identify_platform() { *.zstack.io) _RET="ZStack"; return 0;; esac + local vendor="${DI_DMI_SYS_VENDOR}" + case "$vendor" in + e24cloud) _RET="E24cloud"; return 0;; + esac + # AWS http://docs.aws.amazon.com/AWSEC2/ # latest/UserGuide/identify_ec2_instances.html local uuid="" hvuuid="${PATH_SYS_HYPERVISOR}/uuid" -- cgit v1.2.3 From d8ff0e3aa7db0b478bd52ab16cf38cd661412446 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 6 Nov 2019 23:30:54 +0000 Subject: tools: add migrate-lp-user-to-github script to link LP to github To link a launchpad account name to your github account for licensing accountability each LP user should publish a merge proposal in launchpad with their LP account and a matching merge proposal in github using their github user. Cloud-init will track these usename maps in ./tools/.lp-to-git-user as JSON. Run ./tools/migrate-lp-user-to-github to automatically create merge proposals in launchpad and your github account. --- tools/.lp-to-git-user | 1 + tools/migrate-lp-user-to-github | 229 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 230 insertions(+) create mode 100644 tools/.lp-to-git-user create mode 100755 tools/migrate-lp-user-to-github (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user new file mode 100644 index 00000000..0967ef42 --- /dev/null +++ b/tools/.lp-to-git-user @@ -0,0 +1 @@ +{} diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github new file mode 100755 index 00000000..6b095a14 --- /dev/null +++ b/tools/migrate-lp-user-to-github @@ -0,0 +1,229 @@ +#!/usr/bin/python3 +"""Link your Launchpad user to github, proposing branches to LP and Github""" + +from argparse import ArgumentParser +from subprocess import Popen, PIPE +import os +import sys + +try: + from launchpadlib.launchpad import Launchpad +except ImportError: + print("Missing python launchpadlib dependency to create branches for you." + "Install with: sudo apt-get install python3-launchpadlib" ) + sys.exit(1) + +if "avoid-pep8-E402-import-not-top-of-file": + _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + sys.path.insert(0, _tdir) + from cloudinit import util + + +DRYRUN = False +LP_TO_GIT_USER_FILE='.lp-to-git-user' +MIGRATE_BRANCH_NAME='migrate-lp-to-github' +GITHUB_PULL_URL='https://github.com/canonical/cloud-init/compare/master...{github_user}:{branch}' +GH_UPSTREAM_URL='https://github.com/canonical/cloud-init' + + +def error(message): + if isinstance(message, bytes): + message = message.decode('utf-8') + log('ERROR: {error}'.format(error=message)) + sys.exit(1) + + +def log(message): + print(message) + + +def subp(cmd, skip=False): + prefix = 'SKIPPED: ' if skip else '$ ' + log('{prefix}{command}'.format(prefix=prefix, command=' '.join(cmd))) + if skip: + return + proc = Popen(cmd, stdout=PIPE, stderr=PIPE) + out, err = proc.communicate() + if proc.returncode: + error(err if err else out) + return out.decode('utf-8') + + +LP_GIT_PATH_TMPL = 'git+ssh://{launchpad_user}@git.launchpad.net/' +LP_UPSTREAM_PATH_TMPL = LP_GIT_PATH_TMPL + 'cloud-init' +LP_REMOTE_PATH_TMPL = LP_GIT_PATH_TMPL + '~{launchpad_user}/cloud-init' +GITHUB_REMOTE_PATH_TMPL = 'git@github.com:{github_user}/cloud-init.git' + + +# Comment templates +COMMIT_MSG_TMPL = ''' +lp-to-git-users: adding {gh_username} + +Mapped from {lp_username} +''' +PUBLISH_DIR='/tmp/cloud-init-lp-to-github-migration' + +def get_parser(): + parser = ArgumentParser(description=__doc__) + parser.add_argument( + '--dryrun', required=False, default=False, action='store_true', + help=('Run commands and review operation in dryrun mode, ' + 'making not changes.')) + parser.add_argument('launchpad_user', help='Your launchpad username.') + parser.add_argument('github_user', help='Your github username.') + parser.add_argument( + '--local-repo-dir', required=False, dest='repo_dir', + help=('The name of the local directory into which we clone.' + ' Default: {}'.format(PUBLISH_DIR))) + parser.add_argument( + '--upstream-branch', required=False, dest='upstream', + default='origin/master', + help=('The name of remote branch target into which we will merge.' + ' Default: origin/master')) + parser.add_argument( + '-v', '--verbose', required=False, default=False, action='store_true', + help=('Print all actions.')) + parser.add_argument( + '--push-remote', required=False, dest='pushremote', + help=('QA-only provide remote name into which you want to push')) + return parser + + +def create_publish_branch(upstream, publish_branch): + '''Create clean publish branch target in the current git repo.''' + branches = subp(['git', 'branch']) + upstream_remote, upstream_branch = upstream.split('/', 1) + subp(['git', 'checkout', upstream_branch]) + subp(['git', 'pull']) + if publish_branch in branches: + subp(['git', 'branch', '-D', publish_branch]) + subp(['git', 'checkout', upstream, '-b', publish_branch]) + + +def add_lp_and_github_remotes(lp_user, gh_user): + """Add lp and github remotes if not present. + + @return Tuple with (lp_remote_name, gh_remote_name) + """ + lp_remote = LP_REMOTE_PATH_TMPL.format(launchpad_user=lp_user) + gh_remote = GITHUB_REMOTE_PATH_TMPL.format(github_user=gh_user) + remotes = subp(['git', 'remote', '-v']) + lp_remote_name = gh_remote_name = None + for remote in remotes.splitlines(): + if not remote: + continue + remote_name, remote_url, _operation = remote.split() + if lp_remote == remote_url: + lp_remote_name = remote_name + elif gh_remote == remote_url: + gh_remote_name = remote_name + if not lp_remote_name: + log("launchpad: Creating git remote launchpad-{} to point at your" + " LP repo".format(lp_user)) + lp_remote_name = 'launchpad-{}'.format(lp_user) + subp(['git', 'remote', 'add', lp_remote_name, lp_remote]) + subp(['git', 'fetch', lp_remote_name]) + if not gh_remote_name: + log("github: Creating git remote github-{} to point at your" + " GH repo".format(gh_user)) + gh_remote_name = 'github-{}'.format(gh_user) + subp(['git', 'remote', 'add', gh_remote_name, gh_remote]) + try: + subp(['git', 'fetch', gh_remote_name]) + except: + log("ERROR: [github] Could not fetch remote '{remote}'." + "Please create a fork for your github user by clicking 'Fork'" + " from {gh_upstream}".format( + remote=gh_remote, gh_upstream=GH_UPSTREAM_URL)) + sys.exit(1) + return (lp_remote_name, gh_remote_name) + + +def create_migration_branch( + branch_name, upstream, lp_user, gh_user, commit_msg): + """Create an LP to Github migration branch and add lp_user->gh_user.""" + log("Creating a migration branch: {} adding your users".format( + MIGRATE_BRANCH_NAME)) + create_publish_branch(upstream, MIGRATE_BRANCH_NAME) + lp_to_git_map = {} + lp_to_git_file = os.path.join(os.getcwd(), LP_TO_GIT_USER_FILE) + if os.path.exists(lp_to_git_file): + with open(lp_to_git_file) as stream: + lp_to_git_map = util.load_json(stream.read()) + if gh_user in lp_to_git_map.values(): + raise RuntimeError( + "github user '{}' already in {}".format(gh_user, lp_to_git_file)) + if lp_user in lp_to_git_map: + raise RuntimeError( + "launchpad user '{}' already in {}".format( + lp_user, lp_to_git_file)) + lp_to_git_map[lp_user] = gh_user + with open(lp_to_git_file, 'w') as stream: + stream.write(util.json_dumps(lp_to_git_map)) + subp(['git', 'add', lp_to_git_file]) + commit_file = os.path.join(os.path.dirname(os.getcwd()), 'commit.msg') + with open(commit_file, 'wb') as stream: + stream.write(commit_msg.encode('utf-8')) + subp(['git', 'commit', '--all', '-F', commit_file]) + + +def main(): + global DRYRUN + global VERBOSITY + parser = get_parser() + args = parser.parse_args() + DRYRUN = args.dryrun + VERBOSITY = 1 if args.verbose else 0 + repo_dir = args.repo_dir or PUBLISH_DIR + if not os.path.exists(repo_dir): + subp(['git', 'clone', + LP_UPSTREAM_PATH_TMPL.format(launchpad_user=args.launchpad_user), + repo_dir]) + os.chdir(repo_dir) + log("Sycing master branch with upstream") + subp(['git', 'checkout', 'master']) + subp(['git', 'pull']) + lp_remote_name, gh_remote_name = add_lp_and_github_remotes( + args.launchpad_user, args.github_user) + commit_msg = COMMIT_MSG_TMPL.format( + gh_username=args.github_user, lp_username=args.launchpad_user) + create_migration_branch( + MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user, + args.github_user, commit_msg) + + for push_remote in (lp_remote_name, gh_remote_name): + subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force']) + + # Make merge request on LP + log("[launchpad] Automatically creating merge proposal using launchpadlib") + lp = Launchpad.login_with( + "server-team github-migration tool", 'production', version='devel') + master = lp.git_repositories.getByPath( + path='cloud-init').getRefByPath(path='master') + LP_BRANCH_PATH='~{launchpad_user}/cloud-init/+git/cloud-init' + lp_git_repo = lp.git_repositories.getByPath( + path=LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user)) + lp_user_migrate_branch = lp_git_repo.getRefByPath( + path='refs/heads/migrate-lp-to-github') + lp_merge_url = ( + 'https://code.launchpad.net/' + + LP_BRANCH_PATH.format(launchpad_user=args.launchpad_user) + + '/+ref/' + MIGRATE_BRANCH_NAME) + try: + lp_user_migrate_branch.createMergeProposal( + commit_message=commit_msg, merge_target=master, needs_review=True) + except Exception as e: + log('[launchpad] active merge proposal already exists at:\n' + '{url}\n'.format(url=lp_merge_url)) + else: + log("[launchpad] Merge proposal created at:\n{url}.\n".format( + url=lp_merge_url)) + log("To link your account to github open your browser and" + " click 'Create pull request' at the following URL:\n" + "{url}".format(url=GITHUB_PULL_URL.format( + github_user=args.github_user, branch=MIGRATE_BRANCH_NAME))) + return 0 + + +if __name__ == '__main__': + sys.exit(main()) -- cgit v1.2.3 From 14451d468cc80d89a80f0fbab7d8e11f3ac467e7 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 7 Nov 2019 16:17:46 +0000 Subject: tools: migrate script needs to write tools/.lp-to-git-user Also fix commit message lint --- tools/migrate-lp-user-to-github | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index 6b095a14..f012d312 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -56,7 +56,7 @@ GITHUB_REMOTE_PATH_TMPL = 'git@github.com:{github_user}/cloud-init.git' # Comment templates -COMMIT_MSG_TMPL = ''' +COMMIT_MSG_TMPL = '''\ lp-to-git-users: adding {gh_username} Mapped from {lp_username} @@ -146,7 +146,7 @@ def create_migration_branch( MIGRATE_BRANCH_NAME)) create_publish_branch(upstream, MIGRATE_BRANCH_NAME) lp_to_git_map = {} - lp_to_git_file = os.path.join(os.getcwd(), LP_TO_GIT_USER_FILE) + lp_to_git_file = os.path.join(os.getcwd(), 'tools', LP_TO_GIT_USER_FILE) if os.path.exists(lp_to_git_file): with open(lp_to_git_file) as stream: lp_to_git_map = util.load_json(stream.read()) -- cgit v1.2.3 From 0be6a8c9cd41e1d533d4b20d774f3b74fd7f6143 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Thu, 7 Nov 2019 15:15:22 -0700 Subject: lp-to-git-users: adding raharper Mapped from raharper --- tools/.lp-to-git-user | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 0967ef42..b59891a1 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1 +1,3 @@ -{} +{ + "raharper": "raharper" +} \ No newline at end of file -- cgit v1.2.3 From 880d1e8d428e6f4ab4ccf5c4ec1a4c9b41c90172 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Thu, 7 Nov 2019 15:27:47 -0700 Subject: lp-to-git-users: adding blackboxsw Mapped from chad.smith --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index b59891a1..f7d3dbbc 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,3 +1,4 @@ { + "chad.smith": "blackboxsw", "raharper": "raharper" } \ No newline at end of file -- cgit v1.2.3 From ad23f9af4164d920ad1306b7a1eb5785511b7b7e Mon Sep 17 00:00:00 2001 From: Joshua Powers Date: Thu, 7 Nov 2019 15:59:47 -0700 Subject: lp-to-git-users: adding powersj Mapped from powersj --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index f7d3dbbc..9533708f 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,4 +1,5 @@ { "chad.smith": "blackboxsw", + "powersj": "powersj", "raharper": "raharper" } \ No newline at end of file -- cgit v1.2.3 From 8ae0a7030e5e825dd4179761a62fa7f8670df101 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Fri, 8 Nov 2019 10:37:32 -0700 Subject: lp-to-git-users: adding paride Mapped from legovini --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 9533708f..04c7a9eb 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "chad.smith": "blackboxsw", + "legovini": "paride", "powersj": "powersj", "raharper": "raharper" } \ No newline at end of file -- cgit v1.2.3 From fdbca300529a20bedacb16c929cece9697eb89bc Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Sun, 10 Nov 2019 23:36:34 -0600 Subject: pycodestyle: remove unused local variable --- tools/migrate-lp-user-to-github | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index f012d312..6a498072 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -212,7 +212,7 @@ def main(): try: lp_user_migrate_branch.createMergeProposal( commit_message=commit_msg, merge_target=master, needs_review=True) - except Exception as e: + except Exception: log('[launchpad] active merge proposal already exists at:\n' '{url}\n'.format(url=lp_merge_url)) else: -- cgit v1.2.3 From a6d6bf978df971f3353ce617cbfe5bcdf57d2fe8 Mon Sep 17 00:00:00 2001 From: Lars Kellogg-Stedman Date: Tue, 12 Nov 2019 13:35:04 -0700 Subject: lp-to-git-users: adding larsks Mapped from larsks --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 04c7a9eb..65e4d631 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "chad.smith": "blackboxsw", + "larsks": "larsks", "legovini": "paride", "powersj": "powersj", "raharper": "raharper" -- cgit v1.2.3 From 5578305e93b445101ae9f7aa7007657f9a79704b Mon Sep 17 00:00:00 2001 From: Chris Glass Date: Tue, 12 Nov 2019 13:36:09 -0700 Subject: lp-to-git-users: adding chrisglass Mapped from tribaal --- tools/.lp-to-git-user | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 65e4d631..a15c991d 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -3,5 +3,6 @@ "larsks": "larsks", "legovini": "paride", "powersj": "powersj", - "raharper": "raharper" + "raharper": "raharper", + "tribaal": "chrisglass" } \ No newline at end of file -- cgit v1.2.3 From ddbd030b7b144392c1dfcc39ed636d5796ebc75f Mon Sep 17 00:00:00 2001 From: Igor Galić Date: Wed, 13 Nov 2019 09:50:31 -0600 Subject: lp-to-git-users: adding igalic Mapped from i.galic --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index a15c991d..eba1cd8e 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "chad.smith": "blackboxsw", + "i.galic": "igalic", "larsks": "larsks", "legovini": "paride", "powersj": "powersj", -- cgit v1.2.3 From 3baabe76a70b28abeee2da77826a35e27cf9019a Mon Sep 17 00:00:00 2001 From: Harald Jensås Date: Fri, 15 Nov 2019 08:44:23 -0700 Subject: lp-to-git-users: adding hjensas Mapped from harald-jensas --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index eba1cd8e..0ab1d858 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "chad.smith": "blackboxsw", + "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", "legovini": "paride", -- cgit v1.2.3 From 6bff24019d3fabe3fc539334e1f23547e69caf56 Mon Sep 17 00:00:00 2001 From: Thomas Stringer Date: Wed, 20 Nov 2019 10:45:02 -0600 Subject: lp-to-git-users: adding trstringer Mapped from trstringer --- tools/.lp-to-git-user | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 0ab1d858..2d040af2 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -6,5 +6,6 @@ "legovini": "paride", "powersj": "powersj", "raharper": "raharper", - "tribaal": "chrisglass" + "tribaal": "chrisglass", + "trstringer": "trstringer" } \ No newline at end of file -- cgit v1.2.3 From 8db5ee8146d1277f1f6c7d7270a31ffdd8acc971 Mon Sep 17 00:00:00 2001 From: Pengpeng Sun Date: Wed, 20 Nov 2019 10:48:14 -0600 Subject: lp-to-git-users: adding PengpengSun Mapped from pengpengs --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 2d040af2..e418d7fd 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -4,6 +4,7 @@ "i.galic": "igalic", "larsks": "larsks", "legovini": "paride", + "pengpengs": "PengpengSun", "powersj": "powersj", "raharper": "raharper", "tribaal": "chrisglass", -- cgit v1.2.3 From aa935aefd2a01e792a397a28a915f0e029aeaed6 Mon Sep 17 00:00:00 2001 From: Dominic Schlegel Date: Mon, 25 Nov 2019 13:38:00 -0700 Subject: lp-to-git-users: adding do3meli Mapped from d-info-e --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index e418d7fd..4140942f 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "chad.smith": "blackboxsw", + "d-info-e": "do3meli", "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", -- cgit v1.2.3 From 250a3f92473feeb2689f3a214e8f1b79fa419334 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Mon, 25 Nov 2019 17:03:11 -0700 Subject: tools: migrate-lp-user-to-github removes repo_dir if created (#35) To run: ./tools/migrate-lp-user-to-github LAUCHPAD_USERNAME GITHUB_USERNAME --- tools/migrate-lp-user-to-github | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) (limited to 'tools') diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index 6a498072..cbb34695 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -176,24 +176,34 @@ def main(): VERBOSITY = 1 if args.verbose else 0 repo_dir = args.repo_dir or PUBLISH_DIR if not os.path.exists(repo_dir): + cleanup_repo_dir = True subp(['git', 'clone', LP_UPSTREAM_PATH_TMPL.format(launchpad_user=args.launchpad_user), repo_dir]) + else: + cleanup_repo_dir = False + cwd = os.getcwd() os.chdir(repo_dir) log("Sycing master branch with upstream") subp(['git', 'checkout', 'master']) subp(['git', 'pull']) - lp_remote_name, gh_remote_name = add_lp_and_github_remotes( - args.launchpad_user, args.github_user) - commit_msg = COMMIT_MSG_TMPL.format( - gh_username=args.github_user, lp_username=args.launchpad_user) - create_migration_branch( - MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user, - args.github_user, commit_msg) - - for push_remote in (lp_remote_name, gh_remote_name): - subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force']) - + try: + lp_remote_name, gh_remote_name = add_lp_and_github_remotes( + args.launchpad_user, args.github_user) + commit_msg = COMMIT_MSG_TMPL.format( + gh_username=args.github_user, lp_username=args.launchpad_user) + create_migration_branch( + MIGRATE_BRANCH_NAME, args.upstream, args.launchpad_user, + args.github_user, commit_msg) + + for push_remote in (lp_remote_name, gh_remote_name): + subp(['git', 'push', push_remote, MIGRATE_BRANCH_NAME, '--force']) + except Exception as e: + error('Failed setting up migration branches: {0}'.format(e)) + finally: + os.chdir(cwd) + if cleanup_repo_dir and os.path.exists(repo_dir): + util.del_dir(repo_dir) # Make merge request on LP log("[launchpad] Automatically creating merge proposal using launchpadlib") lp = Launchpad.login_with( @@ -222,6 +232,8 @@ def main(): " click 'Create pull request' at the following URL:\n" "{url}".format(url=GITHUB_PULL_URL.format( github_user=args.github_user, branch=MIGRATE_BRANCH_NAME))) + if os.path.exists(repo_dir): + util.del_dir(repo_dir) return 0 -- cgit v1.2.3 From b6055c40189afba323986059434b8d8adc85bba3 Mon Sep 17 00:00:00 2001 From: Igor Galić Date: Tue, 26 Nov 2019 17:44:21 +0100 Subject: set_passwords: support for FreeBSD (#46) Allow setting of user passwords on FreeBSD The www/chpasswd utility which we depended on for FreeBSD installations does *not* do the same thing as the equally named Linux utility. For FreeBSD, we now use the pw(8) utility (which can only process one user at a time) Additionally, we abstract expire passwd into a function, and override it in the FreeBSD distro class. Co-Authored-By: Chad Smith --- cloudinit/config/cc_set_passwords.py | 21 ++++++++++---- cloudinit/config/tests/test_set_passwords.py | 42 +++++++++++++++++++++++++++- cloudinit/distros/__init__.py | 7 +++++ cloudinit/distros/freebsd.py | 7 +++++ tests/unittests/test_distros/test_generic.py | 18 ++++++++++++ tools/build-on-freebsd | 1 - 6 files changed, 89 insertions(+), 7 deletions(-) (limited to 'tools') diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py index 1379428d..c3c5b0ff 100755 --- a/cloudinit/config/cc_set_passwords.py +++ b/cloudinit/config/cc_set_passwords.py @@ -179,20 +179,21 @@ def handle(_name, cfg, cloud, log, args): for line in plist: u, p = line.split(':', 1) if prog.match(p) is not None and ":" not in p: - hashed_plist_in.append("%s:%s" % (u, p)) + hashed_plist_in.append(line) hashed_users.append(u) else: + # in this else branch, we potentially change the password + # hence, a deviation from .append(line) if p == "R" or p == "RANDOM": p = rand_user_password() randlist.append("%s:%s" % (u, p)) plist_in.append("%s:%s" % (u, p)) users.append(u) - ch_in = '\n'.join(plist_in) + '\n' if users: try: log.debug("Changing password for %s:", users) - util.subp(['chpasswd'], ch_in) + chpasswd(cloud.distro, ch_in) except Exception as e: errors.append(e) util.logexc( @@ -202,7 +203,7 @@ def handle(_name, cfg, cloud, log, args): if hashed_users: try: log.debug("Setting hashed password for %s:", hashed_users) - util.subp(['chpasswd', '-e'], hashed_ch_in) + chpasswd(cloud.distro, hashed_ch_in, hashed=True) except Exception as e: errors.append(e) util.logexc( @@ -218,7 +219,7 @@ def handle(_name, cfg, cloud, log, args): expired_users = [] for u in users: try: - util.subp(['passwd', '--expire', u]) + cloud.distro.expire_passwd(u) expired_users.append(u) except Exception as e: errors.append(e) @@ -238,4 +239,14 @@ def handle(_name, cfg, cloud, log, args): def rand_user_password(pwlen=9): return util.rand_str(pwlen, select_from=PW_SET) + +def chpasswd(distro, plist_in, hashed=False): + if util.is_FreeBSD(): + for pentry in plist_in.splitlines(): + u, p = pentry.split(":") + distro.set_passwd(u, p, hashed=hashed) + else: + cmd = ['chpasswd'] + (['-e'] if hashed else []) + util.subp(cmd, plist_in) + # vi: ts=4 expandtab diff --git a/cloudinit/config/tests/test_set_passwords.py b/cloudinit/config/tests/test_set_passwords.py index a2ea5ec4..639fb9ea 100644 --- a/cloudinit/config/tests/test_set_passwords.py +++ b/cloudinit/config/tests/test_set_passwords.py @@ -74,7 +74,7 @@ class TestSetPasswordsHandle(CiTestCase): with_logs = True - def test_handle_on_empty_config(self): + def test_handle_on_empty_config(self, *args): """handle logs that no password has changed when config is empty.""" cloud = self.tmp_cloud(distro='ubuntu') setpass.handle( @@ -108,4 +108,44 @@ class TestSetPasswordsHandle(CiTestCase): '\n'.join(valid_hashed_pwds) + '\n')], m_subp.call_args_list) + @mock.patch(MODPATH + "util.is_FreeBSD") + @mock.patch(MODPATH + "util.subp") + def test_freebsd_calls_custom_pw_cmds_to_set_and_expire_passwords( + self, m_subp, m_is_freebsd): + """FreeBSD calls custom pw commands instead of chpasswd and passwd""" + m_is_freebsd.return_value = True + cloud = self.tmp_cloud(distro='freebsd') + valid_pwds = ['ubuntu:passw0rd'] + cfg = {'chpasswd': {'list': valid_pwds}} + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertEqual([ + mock.call(['pw', 'usermod', 'ubuntu', '-h', '0'], data='passw0rd', + logstring="chpasswd for ubuntu"), + mock.call(['pw', 'usermod', 'ubuntu', '-p', '01-Jan-1970'])], + m_subp.call_args_list) + + @mock.patch(MODPATH + "util.is_FreeBSD") + @mock.patch(MODPATH + "util.subp") + def test_handle_on_chpasswd_list_creates_random_passwords(self, m_subp, + m_is_freebsd): + """handle parses command set random passwords.""" + m_is_freebsd.return_value = False + cloud = self.tmp_cloud(distro='ubuntu') + valid_random_pwds = [ + 'root:R', + 'ubuntu:RANDOM'] + cfg = {'chpasswd': {'expire': 'false', 'list': valid_random_pwds}} + with mock.patch(MODPATH + 'util.subp') as m_subp: + setpass.handle( + 'IGNORED', cfg=cfg, cloud=cloud, log=self.logger, args=[]) + self.assertIn( + 'DEBUG: Handling input for chpasswd as list.', + self.logs.getvalue()) + self.assertNotEqual( + [mock.call(['chpasswd'], + '\n'.join(valid_random_pwds) + '\n')], + m_subp.call_args_list) + + # vi: ts=4 expandtab diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py index 00bdee3d..2ec79577 100644 --- a/cloudinit/distros/__init__.py +++ b/cloudinit/distros/__init__.py @@ -591,6 +591,13 @@ class Distro(object): util.logexc(LOG, 'Failed to disable password for user %s', name) raise e + def expire_passwd(self, user): + try: + util.subp(['passwd', '--expire', user]) + except Exception as e: + util.logexc(LOG, "Failed to set 'expire' for %s", user) + raise e + def set_passwd(self, user, passwd, hashed=False): pass_string = '%s:%s' % (user, passwd) cmd = ['chpasswd'] diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index c55f8990..8e5ae96c 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -234,6 +234,13 @@ class Distro(distros.Distro): if passwd_val is not None: self.set_passwd(name, passwd_val, hashed=True) + def expire_passwd(self, user): + try: + util.subp(['pw', 'usermod', user, '-p', '01-Jan-1970']) + except Exception as e: + util.logexc(LOG, "Failed to set pw expiration for %s", user) + raise e + def set_passwd(self, user, passwd, hashed=False): if hashed: hash_opt = "-H" diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py index 791fe612..7e0da4f2 100644 --- a/tests/unittests/test_distros/test_generic.py +++ b/tests/unittests/test_distros/test_generic.py @@ -244,5 +244,23 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase): with self.assertRaises(NotImplementedError): d.get_locale() + def test_expire_passwd_uses_chpasswd(self): + """Test ubuntu.expire_passwd uses the passwd command.""" + for d_name in ("ubuntu", "rhel"): + cls = distros.fetch(d_name) + d = cls(d_name, {}, None) + with mock.patch("cloudinit.util.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with(["passwd", "--expire", "myuser"]) + + def test_expire_passwd_freebsd_uses_pw_command(self): + """Test FreeBSD.expire_passwd uses the pw command.""" + cls = distros.fetch("freebsd") + d = cls("freebsd", {}, None) + with mock.patch("cloudinit.util.subp") as m_subp: + d.expire_passwd("myuser") + m_subp.assert_called_once_with( + ["pw", "usermod", "myuser", "-p", "01-Jan-1970"]) + # vi: ts=4 expandtab diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index 8ae64567..876368a9 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -18,7 +18,6 @@ py_prefix=$(${PYTHON} -c 'import sys; print("py%d%d" % (sys.version_info.major, depschecked=/tmp/c-i.dependencieschecked pkgs=" bash - chpasswd dmidecode e2fsprogs $py_prefix-Jinja2 -- cgit v1.2.3 From 4280c983f0c8c2af8da9775f7929725995e43a7c Mon Sep 17 00:00:00 2001 From: Xiaofeng Wang Date: Mon, 2 Dec 2019 11:00:36 -0600 Subject: lp-to-git-users: adding xiaofengw-vmware Mapped from xiaofengw --- tools/.lp-to-git-user | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 4140942f..557426be 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -9,5 +9,6 @@ "powersj": "powersj", "raharper": "raharper", "tribaal": "chrisglass", - "trstringer": "trstringer" + "trstringer": "trstringer", + "xiaofengw": "xiaofengw-vmware" } \ No newline at end of file -- cgit v1.2.3 From f1a73f54fab7a0aef8adc67d49a07c5a26f9f875 Mon Sep 17 00:00:00 2001 From: Eric Lafontaine Date: Mon, 2 Dec 2019 15:45:55 -0500 Subject: lp-to-git-users: adding elafontaine (#74) Mapped from eric-lafontaine1 --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 557426be..f596fc7b 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,6 +1,7 @@ { "chad.smith": "blackboxsw", "d-info-e": "do3meli", + "eric-lafontaine1": "elafontaine" "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", -- cgit v1.2.3 From 0adcd589d8dddc1ec352f7810a36d62cb6d12133 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 3 Dec 2019 18:00:34 -0700 Subject: fix invalid json in tools/.lp-to-git-user (#85) --- tools/.lp-to-git-user | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index f596fc7b..0fd950ff 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,7 +1,7 @@ { "chad.smith": "blackboxsw", "d-info-e": "do3meli", - "eric-lafontaine1": "elafontaine" + "eric-lafontaine1": "elafontaine", "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", @@ -12,4 +12,4 @@ "tribaal": "chrisglass", "trstringer": "trstringer", "xiaofengw": "xiaofengw-vmware" -} \ No newline at end of file +} -- cgit v1.2.3 From 7ef655c183b8e1490722053eae9b0f216916c686 Mon Sep 17 00:00:00 2001 From: Frederick Lefebvre Date: Wed, 4 Dec 2019 09:22:40 -0800 Subject: lp-to-git-users: adding fred-lefebvre Mapped from fredlefebvre --- tools/.lp-to-git-user | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 0fd950ff..006e6251 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -2,6 +2,7 @@ "chad.smith": "blackboxsw", "d-info-e": "do3meli", "eric-lafontaine1": "elafontaine", + "fredlefebvre": "fred-lefebvre", "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", @@ -12,4 +13,4 @@ "tribaal": "chrisglass", "trstringer": "trstringer", "xiaofengw": "xiaofengw-vmware" -} +} \ No newline at end of file -- cgit v1.2.3 From 5454fbb16d21116ffc479d357263aada1b47041a Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Wed, 4 Dec 2019 12:06:55 -0700 Subject: lp-to-git-users: adding AOhassan Mapped from ahosmanmsft --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 006e6251..21b15b9c 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,4 +1,5 @@ { + "ahosmanmsft": "AOhassan", "chad.smith": "blackboxsw", "d-info-e": "do3meli", "eric-lafontaine1": "elafontaine", -- cgit v1.2.3 From ec6924ea1d321cc87e7414bee7734074590045b8 Mon Sep 17 00:00:00 2001 From: Conrad Hoffmann Date: Wed, 4 Dec 2019 15:22:20 -0600 Subject: lp-to-git-users: adding bitfehler Mapped from bitfehler --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 21b15b9c..3b4b873d 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "ahosmanmsft": "AOhassan", + "bitfehler": "bitfehler", "chad.smith": "blackboxsw", "d-info-e": "do3meli", "eric-lafontaine1": "elafontaine", -- cgit v1.2.3 From 11ef73e9500dcb325be85f8099a42d8d2e4caf95 Mon Sep 17 00:00:00 2001 From: Igor Galić Date: Thu, 12 Dec 2019 01:32:14 +0100 Subject: ds_identify: if /sys is not available use dmidecode (#42) On non-Linux systems, `/sys` won't be available. In these cases, we can query `dmidecode(8)` directly. This PR implements a dmi_decode function to query the same fields ds-identify would otherwise read from /sys. This path is taken when /sys isn't present. In addition to adding dmidecode support, non-Linux systems also need to map in virtualization detection as systemd-detect-virt is not present; on FreeBSD, use sysctl kern.vm_guest and provide a mapping[1] between BSD values and those that match with systemd-detect-virt[2]. 1. https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L149-L157 2. https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html LP: #1852442 --- tests/unittests/test_ds_identify.py | 24 +++++++++++++++- tools/ds-identify | 57 +++++++++++++++++++++++++++++++++++-- 2 files changed, 77 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 12c6ae36..36d7fbbf 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -140,7 +140,8 @@ class DsIdentifyBase(CiTestCase): {'name': 'blkid', 'out': BLKID_EFI_ROOT}, {'name': 'ovf_vmware_transport_guestinfo', 'out': 'No value found', 'ret': 1}, - + {'name': 'dmi_decode', 'ret': 1, + 'err': 'No dmidecode program. ERROR.'}, ] written = [d['name'] for d in mocks] @@ -625,6 +626,21 @@ class TestDsIdentify(DsIdentifyBase): self._test_ds_not_found('Ec2-E24Cloud-negative') +class TestBSDNoSys(DsIdentifyBase): + """Test *BSD code paths + + FreeBSD doesn't have /sys so we use dmidecode(8) here + It also doesn't have systemd-detect-virt(8), so we use sysctl(8) to query + kern.vm_guest, and optionally map it""" + + def test_dmi_decode(self): + """Test that dmidecode(8) works on systems which don't have /sys + + This will be used on *BSD systems. + """ + self._test_ds_found('Hetzner-dmidecode') + + class TestIsIBMProvisioning(DsIdentifyBase): """Test the is_ibm_provisioning method in ds-identify.""" @@ -923,6 +939,12 @@ VALID_CFG = { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, }, + 'Hetzner-dmidecode': { + 'ds': 'Hetzner', + 'mocks': [ + {'name': 'dmi_decode', 'ret': 0, 'RET': 'Hetzner'} + ], + }, 'IBMCloud-metadata': { 'ds': 'IBMCloud', 'mocks': [ diff --git a/tools/ds-identify b/tools/ds-identify index 20a99ee9..c93d4a77 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -179,13 +179,39 @@ debug() { echo "$@" 1>&3 } +dmi_decode() { + local sys_field="$1" dmi_field="" val="" + command -v dmidecode >/dev/null 2>&1 || { + warn "No dmidecode program. Cannot read $sys_field." + return 1 + } + case "$1" in + sys_vendor) dmi_field="system-manufacturer";; + product_name) dmi_field="system-product-name";; + product_uuid) dmi_field="system-uuid";; + product_serial) dmi_field="system-serial-number";; + chassis_asset_tag) dmi_field="chassis-asset-tag";; + *) error "Unknown field $sys_field. Cannot call dmidecode." + return 1;; + esac + val=$(dmidecode --quiet "--string=$dmi_field" 2>/dev/null) || return 1 + _RET="$val" +} + get_dmi_field() { local path="${PATH_SYS_CLASS_DMI_ID}/$1" - if [ ! -f "$path" ] || [ ! -r "$path" ]; then - _RET="$UNAVAILABLE" + _RET="$UNAVAILABLE" + if [ -d "${PATH_SYS_CLASS_DMI_ID}" ]; then + if [ -f "$path" ] && [ -r "$path" ]; then + read _RET < "${path}" || _RET="$ERROR" + return + fi + # if `/sys/class/dmi/id` exists, but not the object we're looking for, + # do *not* fallback to dmidecode! return fi - read _RET < "${path}" || _RET="$ERROR" + dmi_decode "$1" || _RET="$ERROR" + return } block_dev_with_label() { @@ -267,6 +293,31 @@ detect_virt() { if [ $r -eq 0 ] || { [ $r -ne 0 ] && [ "$out" = "none" ]; }; then virt="$out" fi + elif [ "$DI_UNAME_KERNEL_NAME" = "FreeBSD" ]; then + # Map FreeBSD's vm_guest names to those systemd-detect-virt that + # don't match up. See + # https://github.com/freebsd/freebsd/blob/master/sys/kern/subr_param.c#L144-L160 + # https://www.freedesktop.org/software/systemd/man/systemd-detect-virt.html + # + # systemd | kern.vm_guest + # ---------------------+--------------- + # none | none + # kvm | kvm + # vmware | vmware + # microsoft | hv + # oracle | vbox + # xen | xen + # parallels | parallels + # bhyve | bhyve + # vm-other | generic + out=$(sysctl -qn kern.vm_guest 2>/dev/null) && { + case "$out" in + hv) virt="microsoft" ;; + vbox) virt="oracle" ;; + generic) "vm-other";; + *) virt="$out" + esac + } fi _RET="$virt" } -- cgit v1.2.3 From 2267cd31443d2c1032190102dd4b6011076ed182 Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Tue, 17 Dec 2019 21:09:12 +0100 Subject: lp-to-git-users: adding ad-m Mapped from adobrawy --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 3b4b873d..6fa75b26 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,4 +1,5 @@ { + "adobrawy": "ad-m", "ahosmanmsft": "AOhassan", "bitfehler": "bitfehler", "chad.smith": "blackboxsw", -- cgit v1.2.3 From 77c5a6522b288cea9e88991cc5b3bad6174fde6f Mon Sep 17 00:00:00 2001 From: Adam Dobrawy Date: Tue, 17 Dec 2019 23:04:30 +0100 Subject: tools: Detect python to use via env in migrate-lp-user-to-github Reduce incosistency and allow use virtualenv for launchpad packages. --- tools/migrate-lp-user-to-github | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index cbb34695..1c058183 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 """Link your Launchpad user to github, proposing branches to LP and Github""" from argparse import ArgumentParser -- cgit v1.2.3 From f76c960aac760f4906c930e973369f6fff229487 Mon Sep 17 00:00:00 2001 From: Gonéri Le Bouder Date: Thu, 19 Dec 2019 16:30:54 -0500 Subject: lp-to-git-users: adding goneri (#133) Mapped from goneri --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 6fa75b26..11ce6e05 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -6,6 +6,7 @@ "d-info-e": "do3meli", "eric-lafontaine1": "elafontaine", "fredlefebvre": "fred-lefebvre", + "goneri": "goneri", "harald-jensas": "hjensas", "i.galic": "igalic", "larsks": "larsks", -- cgit v1.2.3 From 3bc8790711aaf689ae957748a131be782167db97 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Fri, 20 Dec 2019 15:18:53 -0500 Subject: migrate-lp-user-to-github: ensure Launchpad repo exists (#136) * migrate-lp-user-to-github: remove unused option * migrate-lp-user-to-github: ensure Launchpad repo exists * migrate-lp-user-to-github: typo fix --- tools/migrate-lp-user-to-github | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/tools/migrate-lp-user-to-github b/tools/migrate-lp-user-to-github index 1c058183..f1247cb3 100755 --- a/tools/migrate-lp-user-to-github +++ b/tools/migrate-lp-user-to-github @@ -83,9 +83,6 @@ def get_parser(): parser.add_argument( '-v', '--verbose', required=False, default=False, action='store_true', help=('Print all actions.')) - parser.add_argument( - '--push-remote', required=False, dest='pushremote', - help=('QA-only provide remote name into which you want to push')) return parser @@ -122,7 +119,12 @@ def add_lp_and_github_remotes(lp_user, gh_user): " LP repo".format(lp_user)) lp_remote_name = 'launchpad-{}'.format(lp_user) subp(['git', 'remote', 'add', lp_remote_name, lp_remote]) - subp(['git', 'fetch', lp_remote_name]) + try: + subp(['git', 'fetch', lp_remote_name]) + except: + log("launchpad: Pushing to ensure LP repo exists") + subp(['git', 'push', lp_remote_name, 'master:master']) + subp(['git', 'fetch', lp_remote_name]) if not gh_remote_name: log("github: Creating git remote github-{} to point at your" " GH repo".format(gh_user)) @@ -184,7 +186,7 @@ def main(): cleanup_repo_dir = False cwd = os.getcwd() os.chdir(repo_dir) - log("Sycing master branch with upstream") + log("Syncing master branch with upstream") subp(['git', 'checkout', 'master']) subp(['git', 'pull']) try: -- cgit v1.2.3 From cf2f7a9c4a584bd3cfb265013fbed95836341919 Mon Sep 17 00:00:00 2001 From: Frederick Lefebvre Date: Fri, 20 Dec 2019 15:15:16 -0800 Subject: Add support for the amazon variant in cloud.cfg.tmpl (#119) --- config/cloud.cfg.tmpl | 22 +++++++++++++++++++--- tools/render-cloudcfg | 4 ++-- 2 files changed, 21 insertions(+), 5 deletions(-) (limited to 'tools') diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 18ab0ac5..99f96ea1 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -21,8 +21,11 @@ disable_root: false disable_root: true {% endif %} -{% if variant in ["centos", "fedora", "rhel"] %} +{% if variant in ["amazon", "centos", "fedora", "rhel"] %} mount_default_fields: [~, ~, 'auto', 'defaults,nofail', '0', '2'] +{% if variant == "amazon" %} +resize_rootfs: noblock +{% endif %} resize_rootfs_tmp: /dev ssh_pwauth: 0 @@ -42,6 +45,13 @@ datasource_list: ['NoCloud', 'ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] # timeout: 5 # (defaults to 50 seconds) # max_wait: 10 # (defaults to 120 seconds) + +{% if variant == "amazon" %} +# Amazon Linux relies on ec2-net-utils for network configuration +network: + config: disabled +{% endif %} + # The modules that run in the 'init' stage cloud_init_modules: - migrator @@ -133,7 +143,7 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %} +{% if variant in ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", "ubuntu"] %} distro: {{ variant }} {% else %} # Unknown/fallback distro. @@ -181,12 +191,18 @@ system_info: primary: http://ports.ubuntu.com/ubuntu-ports security: http://ports.ubuntu.com/ubuntu-ports ssh_svcname: ssh -{% elif variant in ["arch", "centos", "fedora", "rhel", "suse"] %} +{% elif variant in ["amazon", "arch", "centos", "fedora", "rhel", "suse"] %} # Default user name + that default users groups (if added/used) default_user: +{% if variant == "amazon" %} + name: ec2-user + lock_passwd: True + gecos: EC2 Default User +{% else %} name: {{ variant }} lock_passwd: True gecos: {{ variant }} Cloud User +{% endif %} {% if variant == "suse" %} groups: [cdrom, users] {% elif variant == "arch" %} diff --git a/tools/render-cloudcfg b/tools/render-cloudcfg index a441f4ff..3d5fa725 100755 --- a/tools/render-cloudcfg +++ b/tools/render-cloudcfg @@ -4,8 +4,8 @@ import argparse import os import sys -VARIANTS = ["arch", "centos", "debian", "fedora", "freebsd", "rhel", "suse", - "ubuntu", "unknown"] +VARIANTS = ["amazon", "arch", "centos", "debian", "fedora", "freebsd", "rhel", + "suse", "ubuntu", "unknown"] if "avoid-pep8-E402-import-not-top-of-file": _tdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) -- cgit v1.2.3 From 777ea114c254e16119515a34698cf9a6a8e8c06d Mon Sep 17 00:00:00 2001 From: Andrew Poltavchenko Date: Fri, 27 Dec 2019 18:05:18 +0200 Subject: lp-to-git-users: adding pa-yourserveradmin-com (#145) Mapped from andreipoltavchenko --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 11ce6e05..c2e0c9aa 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,6 +1,7 @@ { "adobrawy": "ad-m", "ahosmanmsft": "AOhassan", + "andreipoltavchenko": "pa-yourserveradmin-com", "bitfehler": "bitfehler", "chad.smith": "blackboxsw", "d-info-e": "do3meli", -- cgit v1.2.3 From f8950d639e6d67d88197b152e354fc9eac1b934f Mon Sep 17 00:00:00 2001 From: Louis Bouchard Date: Mon, 6 Jan 2020 17:11:47 +0100 Subject: lp-to-git-users: adding karibou (#140) Mapped from louis --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index c2e0c9aa..08f7e5cd 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -12,6 +12,7 @@ "i.galic": "igalic", "larsks": "larsks", "legovini": "paride", + "louis": "karibou", "pengpengs": "PengpengSun", "powersj": "powersj", "raharper": "raharper", -- cgit v1.2.3 From 46887d4106bafdd5afd8a2bbf02825ca7b66095e Mon Sep 17 00:00:00 2001 From: Anton Date: Tue, 7 Jan 2020 15:53:07 +0100 Subject: lp-to-git-users: adding ask0n (#150) Mapped from askon --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 08f7e5cd..d450ce6e 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -2,6 +2,7 @@ "adobrawy": "ad-m", "ahosmanmsft": "AOhassan", "andreipoltavchenko": "pa-yourserveradmin-com", + "askon": "ask0n", "bitfehler": "bitfehler", "chad.smith": "blackboxsw", "d-info-e": "do3meli", -- cgit v1.2.3 From 098073de8865a479afa770833dd7a28d1233d294 Mon Sep 17 00:00:00 2001 From: Madhuri Kumari Date: Tue, 7 Jan 2020 21:54:32 +0530 Subject: lp-to-git-users: adding madhuri-rai07 (#156) Mapped from madhuri-rai07 --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index d450ce6e..042d919e 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -14,6 +14,7 @@ "larsks": "larsks", "legovini": "paride", "louis": "karibou", + "madhuri-rai07": "madhuri-rai07", "pengpengs": "PengpengSun", "powersj": "powersj", "raharper": "raharper", -- cgit v1.2.3 From 7801bd092d38ab18c3bcd5e83714f64de524383c Mon Sep 17 00:00:00 2001 From: andreaf74 <53090017+andreaf74@users.noreply.github.com> Date: Tue, 7 Jan 2020 18:52:22 +0100 Subject: lp-to-git-users: adding andreaf74 (#157) Mapped from afranceschini --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 042d919e..79b43d0a 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -1,5 +1,6 @@ { "adobrawy": "ad-m", + "afranceschini": "andreaf74", "ahosmanmsft": "AOhassan", "andreipoltavchenko": "pa-yourserveradmin-com", "askon": "ask0n", -- cgit v1.2.3 From 3b27d5319d219a7f90aa86725407931dc077dec2 Mon Sep 17 00:00:00 2001 From: Robert Schweikert Date: Tue, 7 Jan 2020 16:41:40 -0500 Subject: lp-to-git-users: adding rjschwei (#158) Mapped from rjschwei --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 79b43d0a..4a687596 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -19,6 +19,7 @@ "pengpengs": "PengpengSun", "powersj": "powersj", "raharper": "raharper", + "rjschwei": "rjschwei", "tribaal": "chrisglass", "trstringer": "trstringer", "xiaofengw": "xiaofengw-vmware" -- cgit v1.2.3 From 6aa14766a7039333aac6f75dc442fa58dc4e692f Mon Sep 17 00:00:00 2001 From: Eduardo Otubo Date: Wed, 8 Jan 2020 15:39:52 +0100 Subject: lp-to-git-users: adding otubo (#135) Mapped from otubo --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 4a687596..304fbbae 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -16,6 +16,7 @@ "legovini": "paride", "louis": "karibou", "madhuri-rai07": "madhuri-rai07", + "otubo": "otubo", "pengpengs": "PengpengSun", "powersj": "powersj", "raharper": "raharper", -- cgit v1.2.3 From 3aaa0f2760527f34ffe0e4c67fc4bb6078ce2b4d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Wed, 8 Jan 2020 13:09:38 -0500 Subject: lp-to-git-users: adding OddBloke (#159) Mapped from daniel-thewatkins --- tools/.lp-to-git-user | 1 + 1 file changed, 1 insertion(+) (limited to 'tools') diff --git a/tools/.lp-to-git-user b/tools/.lp-to-git-user index 304fbbae..6b20d360 100644 --- a/tools/.lp-to-git-user +++ b/tools/.lp-to-git-user @@ -7,6 +7,7 @@ "bitfehler": "bitfehler", "chad.smith": "blackboxsw", "d-info-e": "do3meli", + "daniel-thewatkins": "OddBloke", "eric-lafontaine1": "elafontaine", "fredlefebvre": "fred-lefebvre", "goneri": "goneri", -- cgit v1.2.3 From fa3bdc9ecf9421ac3bad1bf6d01a82c70714af29 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Tue, 28 Jan 2020 16:30:21 +0100 Subject: tools/run-container: drop support for python2 (#192) Drop support for specifying an Python interpreter different from python3 from tools/run-container. --- tools/run-container | 51 +++++++++++---------------------------------------- 1 file changed, 11 insertions(+), 40 deletions(-) (limited to 'tools') diff --git a/tools/run-container b/tools/run-container index 1d24e15b..95b2982a 100755 --- a/tools/run-container +++ b/tools/run-container @@ -35,9 +35,6 @@ Usage: ${0##*/} [ options ] [images:]image-ref tested. Inside container, changes are in local-changes.diff. -k | --keep keep container after tests - --pyexe V python version to use. Default=auto. - Should be name of an executable. - ('python2' or 'python3') -p | --package build a binary package (.deb or .rpm) -s | --source-package build source package (debuild -S or srpm) -u | --unittest run unit tests @@ -262,32 +259,23 @@ prep() { # we need some very basic things not present in the container. # - git # - tar (CentOS 6 lxc container does not have it) - # - python-argparse (or python3) + # - python3 local needed="" pair="" pkg="" cmd="" needed="" local pairs="tar:tar git:git" - local pyexe="$1" get_os_info - local py2pkg="python2" py3pkg="python3" + local py3pkg="python3" case "$OS_NAME" in opensuse) - py2pkg="python-base" py3pkg="python3-base";; esac - case "$pyexe" in - python2) pairs="$pairs python2:$py2pkg";; - python3) pairs="$pairs python3:$py3pkg";; - esac + pairs="$pairs python3:$py3pkg" for pair in $pairs; do pkg=${pair#*:} cmd=${pair%%:*} command -v "$cmd" >/dev/null 2>&1 || needed="${needed} $pkg" done - if [ "$OS_NAME" = "centos" -a "$pyexe" = "python2" ]; then - python -c "import argparse" >/dev/null 2>&1 || - needed="${needed} python-argparse" - fi needed=${needed# } if [ -z "$needed" ]; then error "No prep packages needed" @@ -300,15 +288,7 @@ prep() { } nose() { - local pyexe="$1" cmd="" - shift - get_os_info - if [ "$OS_NAME/$OS_VERSION" = "centos/6" ]; then - cmd="nosetests" - else - cmd="$pyexe -m nose" - fi - ${cmd} "$@" + python3 -m nose "$@" } is_done_cloudinit() { @@ -411,7 +391,7 @@ run_self_inside_as_cd() { main() { local short_opts="a:hknpsuv" - local long_opts="artifacts:,dirty,help,keep,name:,pyexe:,package,source-package,unittest,verbose" + local long_opts="artifacts:,dirty,help,keep,name:,package,source-package,unittest,verbose" local getopt_out="" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && @@ -420,7 +400,7 @@ main() { local cur="" next="" local package=false srcpackage=false unittest="" name="" - local dirty=false pyexe="auto" artifact_d="." + local dirty=false artifact_d="." while [ $# -ne 0 ]; do cur="${1:-}"; next="${2:-}"; @@ -430,7 +410,6 @@ main() { -h|--help) Usage ; exit 0;; -k|--keep) KEEP=true;; -n|--name) name="$next"; shift;; - --pyexe) pyexe=$next; shift;; -p|--package) package=true;; -s|--source-package) srcpackage=true;; -u|--unittest) unittest=1;; @@ -470,16 +449,8 @@ main() { get_os_info_in "$name" || { errorrc "failed to get os_info in $name"; return; } - if [ "$pyexe" = "auto" ]; then - case "$OS_NAME/$OS_VERSION" in - centos/*|opensuse/*) pyexe=python2;; - *) pyexe=python3;; - esac - debug 1 "set pyexe=$pyexe for $OS_NAME/$OS_VERSION" - fi - # prep the container (install very basic dependencies) - run_self_inside "$name" prep "$pyexe" || + run_self_inside "$name" prep || { errorrc "Failed to prep container $name"; return; } # add the user @@ -493,7 +464,7 @@ main() { } inside_as_cd "$name" root "$cdir" \ - $pyexe ./tools/read-dependencies "--distro=${OS_NAME}" \ + python3 ./tools/read-dependencies "--distro=${OS_NAME}" \ --test-distro || { errorrc "FAIL: failed to install dependencies with read-dependencies" return @@ -507,7 +478,7 @@ main() { if [ -n "$unittest" ]; then debug 1 "running unit tests." - run_self_inside_as_cd "$name" "$user" "$cdir" nose "$pyexe" \ + run_self_inside_as_cd "$name" "$user" "$cdir" nose \ tests/unittests cloudinit/ || { errorrc "nosetests failed."; errors[${#errors[@]}]="nosetests" @@ -537,7 +508,7 @@ main() { } debug 1 "building source package with $build_srcpkg." # shellcheck disable=SC2086 - inside_as_cd "$name" "$user" "$cdir" $pyexe $build_srcpkg || { + inside_as_cd "$name" "$user" "$cdir" python3 $build_srcpkg || { errorrc "failed: $build_srcpkg"; errors[${#errors[@]}]="source package" } @@ -550,7 +521,7 @@ main() { } debug 1 "building binary package with $build_pkg." # shellcheck disable=SC2086 - inside_as_cd "$name" "$user" "$cdir" $pyexe $build_pkg || { + inside_as_cd "$name" "$user" "$cdir" python3 $build_pkg || { errorrc "failed: $build_pkg"; errors[${#errors[@]}]="binary package" } -- cgit v1.2.3 From 0ecbd888c9491176ae41bbfa2b74a05234882000 Mon Sep 17 00:00:00 2001 From: Paride Legovini Date: Thu, 30 Jan 2020 19:34:12 +0100 Subject: run-container: use 'test -n' instead of 'test ! -z' (#202) Fixes shellcheck warning SC2236. --- tools/run-container | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'tools') diff --git a/tools/run-container b/tools/run-container index 95b2982a..23243474 100755 --- a/tools/run-container +++ b/tools/run-container @@ -347,7 +347,7 @@ wait_for_boot() { run_self_inside "$name" wait_inside "$name" "$wtime" "$VERBOSITY" || { errorrc "wait inside $name failed."; return; } - if [ ! -z "${http_proxy-}" ]; then + if [ -n "${http_proxy-}" ]; then if [ "$OS_NAME" = "centos" ]; then debug 1 "configuring proxy ${http_proxy}" inside "$name" sh -c "echo proxy=$http_proxy >> /etc/yum.conf" -- cgit v1.2.3 From 4ad5c5d058cc9501ef2f294cb6430d9ffe372e7d Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Thu, 20 Feb 2020 12:11:04 -0500 Subject: Update tooling for GitHub-based new releases (#223) * tools/read-version: don't enforce version parity in release branch CI We have a bootstrapping problem with new releases, currently. To take the example of 20.1: the branch that bumps the version fails CI because there is no 20.1 tag for it to use in read-version. Previously, this was solved by creating a tag and pushing it to the cloud-init repo before the commit landed. However, we have GitHub branch protection enabled, so the commit that needs to be tagged is not created until the pull request lands in master. This works around this problem by introducing a very specific check: if we are performing CI for an upstream release branch, we skip the read-version checking that we know will fail. * tools/make-tarball: add --version parameter When using make-tarball as part of a CI build of a new upstream release, the version it determines is inconsistent with the version that other tools determine. Instead of encoding the logic here (as well as in Python elsewhere), we add a parameter to allow us to set it from outside the script. * packages/bddeb: handle missing version_long in new version CI If we're running in CI for a new upstream release, we have to use `version` instead of `version_long` (because we don't yet have the tag required to generate `version_long`). --- packages/bddeb | 9 ++++++++- tools/make-tarball | 12 ++++++++++-- tools/read-version | 9 ++++++++- 3 files changed, 26 insertions(+), 4 deletions(-) (limited to 'tools') diff --git a/packages/bddeb b/packages/bddeb index 95602a02..209765a5 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -177,6 +177,11 @@ def main(): # output like 0.7.6-1022-g36e92d3 ver_data = read_version() + if ver_data['is_release_branch_ci']: + # If we're performing CI for a new release branch, we don't yet + # have the tag required to generate version_long; use version + # instead. + ver_data['version_long'] = ver_data['version'] # This is really only a temporary archive # since we will extract it then add in the debian @@ -192,7 +197,9 @@ def main(): break if path is None: print("Creating a temp tarball using the 'make-tarball' helper") - run_helper('make-tarball', ['--long', '--output=' + tarball_fp]) + run_helper('make-tarball', + ['--version', ver_data['version_long'], + '--output=' + tarball_fp]) print("Extracting temporary tarball %r" % (tarball)) cmd = ['tar', '-xvzf', tarball_fp, '-C', tdir] diff --git a/tools/make-tarball b/tools/make-tarball index 8d540139..462e7d04 100755 --- a/tools/make-tarball +++ b/tools/make-tarball @@ -15,24 +15,27 @@ Usage: ${0##*/} [revision] options: -h | --help print usage -o | --output FILE write to file + --version VERSION Set the version used in the tarball. Default value is determined with 'git describe'. --orig-tarball Write file cloud-init_.orig.tar.gz --long Use git describe --long for versioning EOF } short_opts="ho:v" -long_opts="help,output:,orig-tarball,long" +long_opts="help,output:,version:,orig-tarball,long" getopt_out=$(getopt --name "${0##*/}" \ --options "${short_opts}" --long "${long_opts}" -- "$@") && eval set -- "${getopt_out}" || { Usage 1>&2; exit 1; } long_opt="" orig_opt="" +version="" while [ $# -ne 0 ]; do cur=$1; next=$2 case "$cur" in -h|--help) Usage; exit 0;; -o|--output) output=$next; shift;; + --version) version=$next; shift;; --long) long_opt="--long";; --orig-tarball) orig_opt=".orig";; --) shift; break;; @@ -41,7 +44,12 @@ while [ $# -ne 0 ]; do done rev=${1:-HEAD} -version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev) +if [ -z "$version" ]; then + version=$(git describe --abbrev=8 "--match=[0-9]*" ${long_opt} $rev) +elif [ ! -z "$long_opt" ]; then + echo "WARNING: --long has no effect when --version is passed" >&2 + exit 1 +fi archive_base="cloud-init-$version" if [ -z "$output" ]; then diff --git a/tools/read-version b/tools/read-version index 6dca659e..92e9fc96 100755 --- a/tools/read-version +++ b/tools/read-version @@ -65,7 +65,13 @@ output_json = '--json' in sys.argv src_version = ci_version.version_string() version_long = None -if is_gitdir(_tdir) and which("git"): +# If we're performing CI for a new release branch (which our tooling creates +# with an "upstream/" prefix), then we don't want to enforce strict version +# matching because we know it will fail. +is_release_branch_ci = ( + os.environ.get("TRAVIS_PULL_REQUEST_BRANCH", "").startswith("upstream/") +) +if is_gitdir(_tdir) and which("git") and not is_release_branch_ci: flags = [] if use_tags: flags = ['--tags'] @@ -113,6 +119,7 @@ data = { 'extra': extra, 'commit': commit, 'distance': distance, + 'is_release_branch_ci': is_release_branch_ci, } if output_json: -- cgit v1.2.3