From 4a60af54957634920e84a928aa22b4fc9a6dfd11 Mon Sep 17 00:00:00 2001 From: Junjie Wang Date: Fri, 21 Apr 2017 20:06:09 +0800 Subject: AliYun: Enable platform identification and enable by default. AliYun cloud platform is now identifying themselves by setting the dmi product id to the well known value "Alibaba Cloud ECS". The changes here identify that properly in tools/ds-identify and in the DataSourceAliYun. Since the 'get_data' for AliYun now identifies itself correctly, we can enable AliYun by default. LP: #1638931 --- cloudinit/sources/DataSourceAliYun.py | 14 +++++++++++++- cloudinit/sources/DataSourceEc2.py | 7 +++++++ 2 files changed, 20 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 9debe947..380e27cb 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -4,8 +4,10 @@ import os from cloudinit import sources from cloudinit.sources import DataSourceEc2 as EC2 +from cloudinit import util DEF_MD_VERSION = "2016-01-01" +ALIYUN_PRODUCT = "Alibaba Cloud ECS" class DataSourceAliYun(EC2.DataSourceEc2): @@ -24,7 +26,17 @@ class DataSourceAliYun(EC2.DataSourceEc2): @property def cloud_platform(self): - return EC2.Platforms.ALIYUN + if self._cloud_platform is None: + if _is_aliyun(): + self._cloud_platform = EC2.Platforms.ALIYUN + else: + self._cloud_platform = EC2.Platforms.NO_EC2_METADATA + + return self._cloud_platform + + +def _is_aliyun(): + return util.read_dmi_data('system-product-name') == ALIYUN_PRODUCT def parse_public_keys(public_keys): diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 2f9c7edf..9e2fdc0a 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -32,7 +32,12 @@ class Platforms(object): AWS = "AWS" BRIGHTBOX = "Brightbox" SEEDED = "Seeded" + # UNKNOWN indicates no positive id. If strict_id is 'warn' or 'false', + # then an attempt at the Ec2 Metadata service will be made. UNKNOWN = "Unknown" + # NO_EC2_METADATA indicates this platform does not have a Ec2 metadata + # service available. No attempt at the Ec2 Metadata service will be made. + NO_EC2_METADATA = "No-EC2-Metadata" class DataSourceEc2(sources.DataSource): @@ -65,6 +70,8 @@ class DataSourceEc2(sources.DataSource): strict_mode, self.cloud_platform) if strict_mode == "true" and self.cloud_platform == Platforms.UNKNOWN: return False + elif self.cloud_platform == Platforms.NO_EC2_METADATA: + return False try: if not self.wait_for_metadata_service(): -- cgit v1.2.3 From 5fb49bacf7441d8d20a7b4e0e7008ca586f5ebab Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 30 May 2017 10:28:05 -0600 Subject: azure: identify platform by well known value in chassis asset tag. Azure sets a known chassis asset tag to 7783-7084-3265-9085-8269-3286-77. We can inspect this in both ds-identify and DataSource.get_data to determine whether we are on Azure. Added unit tests to cover these changes and some minor tweaks to Exception error message content to give more context on malformed or missing ovf-env.xml files. LP: #1693939 --- cloudinit/sources/DataSourceAzure.py | 9 +++- tests/unittests/test_datasource/test_azure.py | 66 +++++++++++++++++++++++++-- tests/unittests/test_ds_identify.py | 39 ++++++++++++++++ tools/ds-identify | 35 +++++++++----- 4 files changed, 134 insertions(+), 15 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index b9458ffa..314848e4 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -36,6 +36,8 @@ RESOURCE_DISK_PATH = '/dev/disk/cloud/azure_resource' DEFAULT_PRIMARY_NIC = 'eth0' LEASE_FILE = '/var/lib/dhcp/dhclient.eth0.leases' DEFAULT_FS = 'ext4' +# DMI chassis-asset-tag is set static for all azure instances +AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -320,6 +322,11 @@ class DataSourceAzureNet(sources.DataSource): # azure removes/ejects the cdrom containing the ovf-env.xml # file on reboot. So, in order to successfully reboot we # need to look in the datadir and consider that valid + asset_tag = util.read_dmi_data('chassis-asset-tag') + if asset_tag != AZURE_CHASSIS_ASSET_TAG: + LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) + return False + asset_tag = util.read_dmi_data('chassis-asset-tag') ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] @@ -694,7 +701,7 @@ def read_azure_ovf(contents): try: dom = minidom.parseString(contents) except Exception as e: - raise BrokenAzureDataSource("invalid xml: %s" % e) + raise BrokenAzureDataSource("Invalid ovf-env.xml: %s" % e) results = find_child(dom.documentElement, lambda n: n.localName == "ProvisioningSection") diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 852ec703..42f49e06 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -76,7 +76,9 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None): return content -class TestAzureDataSource(TestCase): +class TestAzureDataSource(CiTestCase): + + with_logs = True def setUp(self): super(TestAzureDataSource, self).setUp() @@ -160,6 +162,12 @@ scbus-1 on xpt0 bus 0 self.instance_id = 'test-instance-id' + def _dmi_mocks(key): + if key == 'system-uuid': + return self.instance_id + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + self.apply_patches([ (dsaz, 'list_possible_azure_ds_devs', dsdevs), (dsaz, 'invoke_agent', _invoke_agent), @@ -170,7 +178,7 @@ scbus-1 on xpt0 bus 0 (dsaz, 'set_hostname', mock.MagicMock()), (dsaz, 'get_metadata_from_fabric', self.get_metadata_from_fabric), (dsaz.util, 'read_dmi_data', mock.MagicMock( - return_value=self.instance_id)), + side_effect=_dmi_mocks)), ]) dsrc = dsaz.DataSourceAzureNet( @@ -241,6 +249,23 @@ fdescfs /dev/fd fdescfs rw 0 0 res = get_path_dev_freebsd('/etc', mnt_list) self.assertIsNotNone(res) + @mock.patch('cloudinit.sources.DataSourceAzure.util.read_dmi_data') + def test_non_azure_dmi_chassis_asset_tag(self, m_read_dmi_data): + """Report non-azure when DMI's chassis asset tag doesn't match. + + Return False when the asset tag doesn't match Azure's static + AZURE_CHASSIS_ASSET_TAG. + """ + # Return a non-matching asset tag value + nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' + m_read_dmi_data.return_value = nonazure_tag + dsrc = dsaz.DataSourceAzureNet( + {}, distro=None, paths=self.paths) + self.assertFalse(dsrc.get_data()) + self.assertEqual( + "Non-Azure DMI asset tag '{0}' discovered.\n".format(nonazure_tag), + self.logs.getvalue()) + def test_basic_seed_dir(self): odata = {'HostName': "myhost", 'UserName': "myuser"} data = {'ovfcontent': construct_valid_ovf_env(data=odata), @@ -531,9 +556,17 @@ class TestAzureBounce(TestCase): self.patches.enter_context( mock.patch.object(dsaz, 'get_metadata_from_fabric', mock.MagicMock(return_value={}))) + + def _dmi_mocks(key): + if key == 'system-uuid': + return 'test-instance-id' + elif key == 'chassis-asset-tag': + return '7783-7084-3265-9085-8269-3286-77' + raise RuntimeError('should not get here') + self.patches.enter_context( mock.patch.object(dsaz.util, 'read_dmi_data', - mock.MagicMock(return_value='test-instance-id'))) + mock.MagicMock(side_effect=_dmi_mocks))) def setUp(self): super(TestAzureBounce, self).setUp() @@ -696,6 +729,33 @@ class TestAzureBounce(TestCase): self.assertEqual(0, self.set_hostname.call_count) +class TestLoadAzureDsDir(CiTestCase): + """Tests for load_azure_ds_dir.""" + + def setUp(self): + self.source_dir = self.tmp_dir() + super(TestLoadAzureDsDir, self).setUp() + + def test_missing_ovf_env_xml_raises_non_azure_datasource_error(self): + """load_azure_ds_dir raises an error When ovf-env.xml doesn't exit.""" + with self.assertRaises(dsaz.NonAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'No ovf-env file found', + str(context_manager.exception)) + + def test_wb_invalid_ovf_env_xml_calls_read_azure_ovf(self): + """load_azure_ds_dir calls read_azure_ovf to parse the xml.""" + ovf_path = os.path.join(self.source_dir, 'ovf-env.xml') + with open(ovf_path, 'wb') as stream: + stream.write(b'invalid xml') + with self.assertRaises(dsaz.BrokenAzureDataSource) as context_manager: + dsaz.load_azure_ds_dir(self.source_dir) + self.assertEqual( + 'Invalid ovf-env.xml: syntax error: line 1, column 0', + str(context_manager.exception)) + + class TestReadAzureOvf(TestCase): def test_invalid_xml_raises_non_azure_ds(self): invalid_xml = "" + construct_valid_ovf_env(data={}) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 5c26e65f..8ccfe55c 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -39,9 +39,11 @@ RC_FOUND = 0 RC_NOT_FOUND = 1 DS_NONE = 'None' +P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" P_PRODUCT_NAME = "sys/class/dmi/id/product_name" P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} @@ -160,6 +162,30 @@ class TestDsIdentify(CiTestCase): _print_run_output(rc, out, err, cfg, files) return rc, out, err, cfg, files + def test_wb_print_variables(self): + """_print_info reports an array of discovered variables to stderr.""" + data = VALID_CFG['Azure-dmi-detection'] + _, _, err, _, _ = self._call_via_dict(data) + expected_vars = [ + 'DMI_PRODUCT_NAME', 'DMI_SYS_VENDOR', 'DMI_PRODUCT_SERIAL', + 'DMI_PRODUCT_UUID', 'PID_1_PRODUCT_NAME', 'DMI_CHASSIS_ASSET_TAG', + 'FS_LABELS', 'KERNEL_CMDLINE', 'VIRT', 'UNAME_KERNEL_NAME', + 'UNAME_KERNEL_RELEASE', 'UNAME_KERNEL_VERSION', 'UNAME_MACHINE', + 'UNAME_NODENAME', 'UNAME_OPERATING_SYSTEM', 'DSNAME', 'DSLIST', + 'MODE', 'ON_FOUND', 'ON_MAYBE', 'ON_NOTFOUND'] + for var in expected_vars: + self.assertIn('{0}='.format(var), err) + + def test_azure_dmi_detection_from_chassis_asset_tag(self): + """Azure datasource is detected from DMI chassis-asset-tag""" + self._test_ds_found('Azure-dmi-detection') + + def test_azure_seed_file_detection(self): + """Azure datasource is detected due to presence of a seed file. + + The seed file tested is /var/lib/cloud/seed/azure/ovf-env.xml.""" + self._test_ds_found('Azure-seed-detection') + def test_aws_ec2_hvm(self): """EC2: hvm instances use dmi serial and uuid starting with 'ec2'.""" self._test_ds_found('Ec2-hvm') @@ -272,6 +298,19 @@ VALID_CFG = { 'ds': 'AliYun', 'files': {P_PRODUCT_NAME: 'Alibaba Cloud ECS\n'}, }, + 'Azure-dmi-detection': { + 'ds': 'Azure', + 'files': { + P_CHASSIS_ASSET_TAG: '7783-7084-3265-9085-8269-3286-77\n', + } + }, + 'Azure-seed-detection': { + 'ds': 'Azure', + 'files': { + P_CHASSIS_ASSET_TAG: 'No-match\n', + os.path.join(P_SEED_DIR, 'azure', 'ovf-env.xml'): 'present\n', + } + }, 'Ec2-hvm': { 'ds': 'Ec2', 'mocks': [{'name': 'detect_virt', 'RET': 'kvm', 'ret': 0}], diff --git a/tools/ds-identify b/tools/ds-identify index 5fc500b9..546e0f59 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -85,6 +85,7 @@ DI_MAIN=${DI_MAIN:-main} DI_DEFAULT_POLICY="search,found=all,maybe=all,notfound=${DI_DISABLED}" DI_DEFAULT_POLICY_NO_DMI="search,found=all,maybe=all,notfound=${DI_ENABLED}" +DI_DMI_CHASSIS_ASSET_TAG="" DI_DMI_PRODUCT_NAME="" DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" @@ -259,6 +260,12 @@ read_kernel_cmdline() { DI_KERNEL_CMDLINE="$cmdline" } +read_dmi_chassis_asset_tag() { + cached "${DI_DMI_CHASSIS_ASSET_TAG}" && return + get_dmi_field chassis_asset_tag + DI_DMI_CHASSIS_ASSET_TAG="$_RET" +} + read_dmi_sys_vendor() { cached "${DI_DMI_SYS_VENDOR}" && return get_dmi_field sys_vendor @@ -386,6 +393,14 @@ read_pid1_product_name() { DI_PID_1_PRODUCT_NAME="$product_name" } +dmi_chassis_asset_tag_matches() { + is_container && return 1 + case "${DI_DMI_CHASSIS_ASSET_TAG}" in + $1) return 0;; + esac + return 1 +} + dmi_product_name_matches() { is_container && return 1 case "${DI_DMI_PRODUCT_NAME}" in @@ -402,11 +417,6 @@ dmi_product_serial_matches() { return 1 } -dmi_product_name_is() { - is_container && return 1 - [ "${DI_DMI_PRODUCT_NAME}" = "$1" ] -} - dmi_sys_vendor_is() { is_container && return 1 [ "${DI_DMI_SYS_VENDOR}" = "$1" ] @@ -478,7 +488,7 @@ dscheck_CloudStack() { dscheck_CloudSigma() { # http://paste.ubuntu.com/23624795/ - dmi_product_name_is "CloudSigma" && return $DS_FOUND + dmi_product_name_matches "CloudSigma" && return $DS_FOUND return $DS_NOT_FOUND } @@ -654,6 +664,8 @@ dscheck_Azure() { # UUID="112D211272645f72" LABEL="rd_rdfe_stable.161212-1209" # TYPE="udf">/dev/sr0 # + local azure_chassis="7783-7084-3265-9085-8269-3286-77" + dmi_chassis_asset_tag_matches "${azure_chassis}" && return $DS_FOUND check_seed_dir azure ovf-env.xml && return ${DS_FOUND} [ "${DI_VIRT}" = "microsoft" ] || return ${DS_NOT_FOUND} @@ -786,7 +798,7 @@ dscheck_Ec2() { } dscheck_GCE() { - if dmi_product_name_is "Google Compute Engine"; then + if dmi_product_name_matches "Google Compute Engine"; then return ${DS_FOUND} fi # product name is not guaranteed (LP: #1674861) @@ -807,10 +819,10 @@ dscheck_OpenStack() { return ${DS_NOT_FOUND} fi local nova="OpenStack Nova" compute="OpenStack Compute" - if dmi_product_name_is "$nova"; then + if dmi_product_name_matches "$nova"; then return ${DS_FOUND} fi - if dmi_product_name_is "$compute"; then + if dmi_product_name_matches "$compute"; then # RDO installed nova (LP: #1675349). return ${DS_FOUND} fi @@ -823,7 +835,7 @@ dscheck_OpenStack() { dscheck_AliYun() { check_seed_dir "AliYun" meta-data user-data && return ${DS_FOUND} - if dmi_product_name_is "Alibaba Cloud ECS"; then + if dmi_product_name_matches "Alibaba Cloud ECS"; then return $DS_FOUND fi return $DS_NOT_FOUND @@ -889,6 +901,7 @@ collect_info() { read_config read_datasource_list read_dmi_sys_vendor + read_dmi_chassis_asset_tag read_dmi_product_name read_dmi_product_serial read_dmi_product_uuid @@ -903,7 +916,7 @@ print_info() { _print_info() { local n="" v="" vars="" vars="DMI_PRODUCT_NAME DMI_SYS_VENDOR DMI_PRODUCT_SERIAL" - vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME" + vars="$vars DMI_PRODUCT_UUID PID_1_PRODUCT_NAME DMI_CHASSIS_ASSET_TAG" vars="$vars FS_LABELS KERNEL_CMDLINE VIRT" vars="$vars UNAME_KERNEL_NAME UNAME_KERNEL_RELEASE UNAME_KERNEL_VERSION" vars="$vars UNAME_MACHINE UNAME_NODENAME UNAME_OPERATING_SYSTEM" -- cgit v1.2.3 From 1cd4323b940408aa34dcaa01bd8a7ed43d9a966a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 1 Jun 2017 12:40:12 -0400 Subject: azure: remove accidental duplicate line in merge. In previous commit I inadvertantly left two calls to asset_tag = util.read_dmi_data('chassis-asset-tag') The second did not do anything useful. Thus, remove it. --- cloudinit/sources/DataSourceAzure.py | 1 - 1 file changed, 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 314848e4..a0b9eaef 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -326,7 +326,6 @@ class DataSourceAzureNet(sources.DataSource): if asset_tag != AZURE_CHASSIS_ASSET_TAG: LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) return False - asset_tag = util.read_dmi_data('chassis-asset-tag') ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] -- cgit v1.2.3 From 802e7cb2da8e2d0225525160e6edd6b58b275b8c Mon Sep 17 00:00:00 2001 From: Vladimir Pouzanov Date: Tue, 2 May 2017 16:08:34 +0100 Subject: NoCloud: support seed of nocloud from smbios information This allows the user to seed NoCloud in a trivial way from qemu/libvirt, by using a stock image and passing a single command line flag. No custom command line, no filesystem modification, no bootstrap disk image. This is particularly handy now that Ec2 backend is discouraged from use under bug 1660385. LP: #1691772 --- cloudinit/sources/DataSourceNoCloud.py | 12 ++++++++++++ doc/rtd/topics/datasources/nocloud.rst | 22 ++++++++++++++++++++++ tools/ds-identify | 3 +++ 3 files changed, 37 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index c68f6b8c..e641244d 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -42,6 +42,18 @@ class DataSourceNoCloud(sources.DataSource): mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "", 'network-config': None} + try: + # Parse the system serial label from dmi. If not empty, try parsing + # like the commandline + md = {} + serial = util.read_dmi_data('system-serial-number') + if serial and load_cmdline_data(md, serial): + found.append("dmi") + mydata = _merge_new_seed(mydata, {'meta-data': md}) + except Exception: + util.logexc(LOG, "Unable to parse dmi data") + return False + try: # Parse the kernel command line, getting data passed in md = {} diff --git a/doc/rtd/topics/datasources/nocloud.rst b/doc/rtd/topics/datasources/nocloud.rst index 0159e853..665057f3 100644 --- a/doc/rtd/topics/datasources/nocloud.rst +++ b/doc/rtd/topics/datasources/nocloud.rst @@ -11,6 +11,28 @@ You can provide meta-data and user-data to a local vm boot via files on a `vfat`_ or `iso9660`_ filesystem. The filesystem volume label must be ``cidata``. +Alternatively, you can provide meta-data via kernel command line or SMBIOS +"serial number" option. The data must be passed in the form of a string: + +:: + + ds=nocloud[;key=val;key=val] + +or + +:: + + ds=nocloud-net[;key=val;key=val] + +e.g. you can pass this option to QEMU: + +:: + + -smbios type=1,serial=ds=nocloud-net;s=http://10.10.0.1:8000/ + +to cause NoCloud to fetch the full meta-data from http://10.10.0.1:8000/meta-data +after the network initialization is complete. + These user-data and meta-data files are expected to be in the following format. :: diff --git a/tools/ds-identify b/tools/ds-identify index 546e0f59..7c8b144b 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -555,6 +555,9 @@ dscheck_NoCloud() { case " ${DI_KERNEL_CMDLINE} " in *\ ds=nocloud*) return ${DS_FOUND};; esac + case " ${DI_DMI_PRODUCT_SERIAL} " in + *\ ds=nocloud*) return ${DS_FOUND};; + esac for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} done -- cgit v1.2.3 From 8a06a1244c8ee20902db050e142c5a0b2fd777a9 Mon Sep 17 00:00:00 2001 From: Hongjiang Zhang Date: Wed, 7 Jun 2017 13:58:51 +0800 Subject: FreeBSD: fix cdrom mounting failure if /mnt/cdrom/secure did not exist. The current method is to attempt to mount the cdrom (/dev/cd0), if it is successful, /dev/cd0 is configured, otherwise, it is not configured. The problem is it forgets to check whether the mounting destination folder is created or not. As a result, mounting attempt failed even if cdrom is ready. LP: #1696295 --- cloudinit/sources/DataSourceAzure.py | 15 +++++++-------- tests/unittests/test_datasource/test_azure.py | 12 +++++++++++- 2 files changed, 18 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a0b9eaef..a8bad90b 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -799,18 +799,17 @@ def encrypt_pass(password, salt_id="$6$"): def list_possible_azure_ds_devs(): - # return a sorted list of devices that might have a azure datasource devlist = [] if util.is_FreeBSD(): + # add '/dev/cd0' to devlist if it is configured + # here wants to test whether '/dev/cd0' is available cdrom_dev = "/dev/cd0" try: - util.subp(["mount", "-o", "ro", "-t", "udf", cdrom_dev, - "/mnt/cdrom/secure"]) - except util.ProcessExecutionError: - LOG.debug("Fail to mount cd") - return devlist - util.subp(["umount", "/mnt/cdrom/secure"]) - devlist.append(cdrom_dev) + with open(cdrom_dev) as fp: + fp.read(1024) + devlist.append(cdrom_dev) + except IOError: + LOG.debug("cdrom (%s) is not configured", cdrom_dev) else: for fstype in ("iso9660", "udf"): devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index b17f389c..114b1a5d 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,7 +7,8 @@ from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd from ..helpers import (CiTestCase, TestCase, populate_dir, mock, - ExitStack, PY26, SkipTest) + ExitStack, PY26, PY3, SkipTest) +from mock import patch, mock_open import crypt import os @@ -543,6 +544,15 @@ fdescfs /dev/fd fdescfs rw 0 0 ds.get_data() self.assertEqual(self.instance_id, ds.metadata['instance-id']) + def test_list_possible_azure_ds_devs(self): + devlist = [] + with patch('platform.platform', + mock.MagicMock(return_value="FreeBSD")): + name = 'builtins.open' if PY3 else '__builtin__.open' + with patch(name, mock_open(read_data="data")): + devlist.extend(dsaz.list_possible_azure_ds_devs()) + self.assertEqual(devlist, ['/dev/cd0']) + class TestAzureBounce(TestCase): -- cgit v1.2.3 From ea0a534d93544837e44d03e3394233d28c247f7d Mon Sep 17 00:00:00 2001 From: Hongjiang Zhang Date: Tue, 13 Jun 2017 16:21:02 +0800 Subject: FreeBSD: replace ifdown/ifup with "ifconfig down" and "ifconfig up". Fix the issue caused by different commands on Linux and FreeBSD. On Linux, we used ifdown and ifup to enable and disable a NIC, but on FreeBSD, the counterpart is "ifconfig down" and "ifconfig up". LP: #1697815 --- cloudinit/sources/DataSourceAzure.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index a8bad90b..ebb53d0a 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -177,6 +177,11 @@ if util.is_FreeBSD(): RESOURCE_DISK_PATH = "/dev/" + res_disk else: LOG.debug("resource disk is None") + BOUNCE_COMMAND = [ + 'sh', '-xc', + ("i=$interface; x=0; ifconfig down $i || x=$?; " + "ifconfig up $i || x=$?; exit $x") + ] BUILTIN_DS_CONFIG = { 'agent_command': AGENT_START_BUILTIN, -- cgit v1.2.3 From 9ccb8f5e2ab262ee04bb9c103c1302479f7c81d3 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 15 Jun 2017 16:39:50 -0400 Subject: FreeBSD: fix test failure The previous commit caused test failure. This separates out _check_freebsd_cdrom and mocks it in a test rather than patching open. --- cloudinit/sources/DataSourceAzure.py | 21 +++++++++++++-------- tests/unittests/test_datasource/test_azure.py | 21 +++++++++++---------- 2 files changed, 24 insertions(+), 18 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ebb53d0a..71e7c55c 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -803,18 +803,23 @@ def encrypt_pass(password, salt_id="$6$"): return crypt.crypt(password, salt_id + util.rand_str(strlen=16)) +def _check_freebsd_cdrom(cdrom_dev): + """Return boolean indicating path to cdrom device has content.""" + try: + with open(cdrom_dev) as fp: + fp.read(1024) + return True + except IOError: + LOG.debug("cdrom (%s) is not configured", cdrom_dev) + return False + + def list_possible_azure_ds_devs(): devlist = [] if util.is_FreeBSD(): - # add '/dev/cd0' to devlist if it is configured - # here wants to test whether '/dev/cd0' is available cdrom_dev = "/dev/cd0" - try: - with open(cdrom_dev) as fp: - fp.read(1024) - devlist.append(cdrom_dev) - except IOError: - LOG.debug("cdrom (%s) is not configured", cdrom_dev) + if _check_freebsd_cdrom(cdrom_dev): + return [cdrom_dev] else: for fstype in ("iso9660", "udf"): devlist.extend(util.find_devs_with("TYPE=%s" % fstype)) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 114b1a5d..7d33daf7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -7,8 +7,7 @@ from cloudinit.util import find_freebsd_part from cloudinit.util import get_path_dev_freebsd from ..helpers import (CiTestCase, TestCase, populate_dir, mock, - ExitStack, PY26, PY3, SkipTest) -from mock import patch, mock_open + ExitStack, PY26, SkipTest) import crypt import os @@ -544,14 +543,16 @@ fdescfs /dev/fd fdescfs rw 0 0 ds.get_data() self.assertEqual(self.instance_id, ds.metadata['instance-id']) - def test_list_possible_azure_ds_devs(self): - devlist = [] - with patch('platform.platform', - mock.MagicMock(return_value="FreeBSD")): - name = 'builtins.open' if PY3 else '__builtin__.open' - with patch(name, mock_open(read_data="data")): - devlist.extend(dsaz.list_possible_azure_ds_devs()) - self.assertEqual(devlist, ['/dev/cd0']) + @mock.patch("cloudinit.sources.DataSourceAzure.util.is_FreeBSD") + @mock.patch("cloudinit.sources.DataSourceAzure._check_freebsd_cdrom") + def test_list_possible_azure_ds_devs(self, m_check_fbsd_cdrom, + m_is_FreeBSD): + """On FreeBSD, possible devs should show /dev/cd0.""" + m_is_FreeBSD.return_value = True + m_check_fbsd_cdrom.return_value = True + self.assertEqual(dsaz.list_possible_azure_ds_devs(), ['/dev/cd0']) + self.assertEqual( + [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) class TestAzureBounce(TestCase): -- cgit v1.2.3 From ecb408afa1104fe49ce6eb1dc5708be56abd5cb2 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 15 Jun 2017 10:03:45 -0400 Subject: FreeBSD: Make freebsd a variant, fix unittests and tools/build-on-freebsd. - Simplify the logic of 'variant' in util.system_info much of the data from https://github.com/hpcugent/easybuild/wiki/OS_flavor_name_version - fix get_resource_disk_on_freebsd when running on a system without an Azure resource disk. - fix tools/build-on-freebsd to replace oauth with oauthlib and add bash which is a dependency for tests. - update a fiew places that were checking for freebsd but not using the util.is_FreeBSD() --- cloudinit/config/cc_growpart.py | 2 +- cloudinit/config/cc_power_state_change.py | 2 +- cloudinit/sources/DataSourceAzure.py | 2 +- cloudinit/util.py | 46 ++++++++++-------------- config/cloud.cfg.tmpl | 20 +++++------ tests/unittests/test_handler/test_handler_ntp.py | 2 +- tests/unittests/test_util.py | 9 +++-- tools/build-on-freebsd | 6 ++-- 8 files changed, 40 insertions(+), 49 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index d2bc6e6c..bafca9d8 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -214,7 +214,7 @@ def device_part_info(devpath): # FreeBSD doesn't know of sysfs so just get everything we need from # the device, like /dev/vtbd0p2. - if util.system_info()["platform"].startswith('FreeBSD'): + if util.is_FreeBSD(): m = re.search('^(/dev/.+)p([0-9])$', devpath) return (m.group(1), m.group(2)) diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py index c1c6fe7e..eba58b02 100644 --- a/cloudinit/config/cc_power_state_change.py +++ b/cloudinit/config/cc_power_state_change.py @@ -71,7 +71,7 @@ def givecmdline(pid): # Example output from procstat -c 1 # PID COMM ARGS # 1 init /bin/init -- - if util.system_info()["platform"].startswith('FreeBSD'): + if util.is_FreeBSD(): (output, _err) = util.subp(['procstat', '-c', str(pid)]) line = output.splitlines()[1] m = re.search('\d+ (\w|\.|-)+\s+(/\w.+)', line) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 71e7c55c..4fe0d635 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -101,7 +101,7 @@ def get_dev_storvsc_sysctl(): sysctl_out, err = util.subp(['sysctl', 'dev.storvsc']) except util.ProcessExecutionError: LOG.debug("Fail to execute sysctl dev.storvsc") - return None + sysctl_out = "" return sysctl_out diff --git a/cloudinit/util.py b/cloudinit/util.py index ec68925e..c93b6d7e 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -573,7 +573,7 @@ def is_ipv4(instr): def is_FreeBSD(): - return system_info()['platform'].startswith('FreeBSD') + return system_info()['variant'] == "freebsd" def get_cfg_option_bool(yobj, key, default=False): @@ -598,37 +598,29 @@ def get_cfg_option_int(yobj, key, default=0): def system_info(): info = { 'platform': platform.platform(), + 'system': platform.system(), 'release': platform.release(), 'python': platform.python_version(), 'uname': platform.uname(), - 'dist': platform.linux_distribution(), # pylint: disable=W1505 + 'dist': platform.dist(), # pylint: disable=W1505 } - plat = info['platform'].lower() - # Try to get more info about what it actually is, in a format - # that we can easily use across linux and variants... - if plat.startswith('darwin'): - info['variant'] = 'darwin' - elif plat.endswith("bsd"): - info['variant'] = 'bsd' - elif plat.startswith('win'): - info['variant'] = 'windows' - elif 'linux' in plat: - # Try to get a single string out of these... - linux_dist, _version, _id = info['dist'] - linux_dist = linux_dist.lower() - if linux_dist in ('ubuntu', 'linuxmint', 'mint'): - info['variant'] = 'ubuntu' + system = info['system'].lower() + var = 'unknown' + if system == "linux": + linux_dist = info['dist'][0].lower() + if linux_dist in ('centos', 'fedora', 'debian'): + var = linux_dist + elif linux_dist in ('ubuntu', 'linuxmint', 'mint'): + var = 'ubuntu' + elif linux_dist == 'redhat': + var = 'rhel' else: - for prefix, variant in [('redhat', 'rhel'), - ('centos', 'centos'), - ('fedora', 'fedora'), - ('debian', 'debian')]: - if linux_dist.startswith(prefix): - info['variant'] = variant - if 'variant' not in info: - info['variant'] = 'linux' - if 'variant' not in info: - info['variant'] = 'unknown' + var = 'linux' + elif system in ('windows', 'darwin', "freebsd"): + var = system + + info['variant'] = var + return info diff --git a/config/cloud.cfg.tmpl b/config/cloud.cfg.tmpl index 5af2a88f..f4b9069b 100644 --- a/config/cloud.cfg.tmpl +++ b/config/cloud.cfg.tmpl @@ -2,7 +2,7 @@ # The top level settings are used as module # and system configuration. -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} syslog_fix_perms: root:wheel {% endif %} # A set of users which may be applied and/or used by various modules @@ -13,7 +13,7 @@ users: # If this is set, 'root' will not be able to ssh in and they # will get a message to login instead as the default $user -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} disable_root: false {% else %} disable_root: true @@ -30,7 +30,7 @@ ssh_pwauth: 0 # This will cause the set+update hostname module to not operate (if true) preserve_hostname: false -{% if variant in ["bsd"] %} +{% if variant in ["freebsd"] %} # This should not be required, but leave it in place until the real cause of # not beeing able to find -any- datasources is resolved. datasource_list: ['ConfigDrive', 'Azure', 'OpenStack', 'Ec2'] @@ -53,13 +53,13 @@ cloud_init_modules: - write-files - growpart - resizefs -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - disk_setup - mounts {% endif %} - set_hostname - update_hostname -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - update_etc_hosts - ca-certs - rsyslog @@ -87,7 +87,7 @@ cloud_config_modules: - apt-pipelining - apt-configure {% endif %} -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - ntp {% endif %} - timezone @@ -108,7 +108,7 @@ cloud_final_modules: - landscape - lxd {% endif %} -{% if variant not in ["bsd"] %} +{% if variant not in ["freebsd"] %} - puppet - chef - salt-minion @@ -130,10 +130,8 @@ cloud_final_modules: # (not accessible to handlers/transforms) system_info: # This will affect which distro class gets used -{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu"] %} +{% if variant in ["centos", "debian", "fedora", "rhel", "ubuntu", "freebsd"] %} distro: {{ variant }} -{% elif variant in ["bsd"] %} - distro: freebsd {% else %} # Unknown/fallback distro. distro: ubuntu @@ -182,7 +180,7 @@ system_info: cloud_dir: /var/lib/cloud/ templates_dir: /etc/cloud/templates/ ssh_svcname: sshd -{% elif variant in ["bsd"] %} +{% elif variant in ["freebsd"] %} # Default user name + that default users groups (if added/used) default_user: name: freebsd diff --git a/tests/unittests/test_handler/test_handler_ntp.py b/tests/unittests/test_handler/test_handler_ntp.py index c4299d94..7f278646 100644 --- a/tests/unittests/test_handler/test_handler_ntp.py +++ b/tests/unittests/test_handler/test_handler_ntp.py @@ -62,7 +62,7 @@ class TestNtp(FilesystemMockingTestCase): def test_ntp_rename_ntp_conf(self): """When NTP_CONF exists, rename_ntp moves it.""" ntpconf = self.tmp_path("ntp.conf", self.new_root) - os.mknod(ntpconf) + util.write_file(ntpconf, "") with mock.patch("cloudinit.config.cc_ntp.NTP_CONF", ntpconf): cc_ntp.rename_ntp_conf() self.assertFalse(os.path.exists(ntpconf)) diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py index 014aa6a3..a73fd26a 100644 --- a/tests/unittests/test_util.py +++ b/tests/unittests/test_util.py @@ -20,6 +20,9 @@ except ImportError: import mock +BASH = util.which('bash') + + class FakeSelinux(object): def __init__(self, match_what): @@ -544,17 +547,17 @@ class TestReadSeeded(helpers.TestCase): class TestSubp(helpers.TestCase): - stdin2err = ['bash', '-c', 'cat >&2'] + stdin2err = [BASH, '-c', 'cat >&2'] stdin2out = ['cat'] utf8_invalid = b'ab\xaadef' utf8_valid = b'start \xc3\xa9 end' utf8_valid_2 = b'd\xc3\xa9j\xc8\xa7' - printenv = ['bash', '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] + printenv = [BASH, '-c', 'for n in "$@"; do echo "$n=${!n}"; done', '--'] def printf_cmd(self, *args): # bash's printf supports \xaa. So does /usr/bin/printf # but by using bash, we remove dependency on another program. - return(['bash', '-c', 'printf "$@"', 'printf'] + list(args)) + return([BASH, '-c', 'printf "$@"', 'printf'] + list(args)) def test_subp_handles_utf8(self): # The given bytes contain utf-8 accented characters as seen in e.g. diff --git a/tools/build-on-freebsd b/tools/build-on-freebsd index ccc10b40..ff9153ad 100755 --- a/tools/build-on-freebsd +++ b/tools/build-on-freebsd @@ -8,6 +8,7 @@ fail() { echo "FAILED:" "$@" 1>&2; exit 1; } # Check dependencies: depschecked=/tmp/c-i.dependencieschecked pkgs=" + bash dmidecode e2fsprogs py27-Jinja2 @@ -16,7 +17,7 @@ pkgs=" py27-configobj py27-jsonpatch py27-jsonpointer - py27-oauth + py27-oauthlib py27-prettytable py27-requests py27-serial @@ -35,9 +36,6 @@ touch $depschecked python setup.py build python setup.py install -O1 --skip-build --prefix /usr/local/ --init-system sysvinit_freebsd -# Install the correct config file: -cp config/cloud.cfg-freebsd /etc/cloud/cloud.cfg - # Enable cloud-init in /etc/rc.conf: sed -i.bak -e "/cloudinit_enable=.*/d" /etc/rc.conf echo 'cloudinit_enable="YES"' >> /etc/rc.conf -- cgit v1.2.3 From ebc9ecbc8a76bdf511a456fb72339a7eb4c20568 Mon Sep 17 00:00:00 2001 From: Ryan Harper Date: Tue, 20 Jun 2017 17:06:43 -0500 Subject: Azure: Add network-config, Refactor net layer to handle duplicate macs. On systems with network devices with duplicate mac addresses, cloud-init will fail to rename the devices according to the specified network configuration. Refactor net layer to search by device driver and device id if available. Azure systems may have duplicate mac addresses by design. Update Azure datasource to run at init-local time and let Azure datasource generate a fallback networking config to handle advanced networking configurations. Lastly, add a 'setup' method to the datasources that is called before userdata/vendordata is processed but after networking is up. That is used here on Azure to interact with the 'fabric'. --- cloudinit/cmd/main.py | 3 + cloudinit/net/__init__.py | 181 ++++++++-- cloudinit/net/eni.py | 2 + cloudinit/net/renderer.py | 4 +- cloudinit/net/udev.py | 7 +- cloudinit/sources/DataSourceAzure.py | 114 +++++- cloudinit/sources/__init__.py | 15 +- cloudinit/stages.py | 5 + tests/unittests/test_datasource/test_azure.py | 174 +++++++-- tests/unittests/test_datasource/test_common.py | 2 +- tests/unittests/test_net.py | 478 ++++++++++++++++++++++++- 11 files changed, 887 insertions(+), 98 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index ce3c10dd..139e03b3 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -372,6 +372,9 @@ def main_init(name, args): LOG.debug("[%s] %s is in local mode, will apply init modules now.", mode, init.datasource) + # Give the datasource a chance to use network resources. + # This is used on Azure to communicate with the fabric over network. + init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() # Stage 7 diff --git a/cloudinit/net/__init__.py b/cloudinit/net/__init__.py index 65accbb0..cba991a5 100644 --- a/cloudinit/net/__init__.py +++ b/cloudinit/net/__init__.py @@ -97,6 +97,10 @@ def is_bridge(devname): return os.path.exists(sys_dev_path(devname, "bridge")) +def is_bond(devname): + return os.path.exists(sys_dev_path(devname, "bonding")) + + def is_vlan(devname): uevent = str(read_sys_net_safe(devname, "uevent")) return 'DEVTYPE=vlan' in uevent.splitlines() @@ -124,6 +128,26 @@ def is_present(devname): return os.path.exists(sys_dev_path(devname)) +def device_driver(devname): + """Return the device driver for net device named 'devname'.""" + driver = None + driver_path = sys_dev_path(devname, "device/driver") + # driver is a symlink to the driver *dir* + if os.path.islink(driver_path): + driver = os.path.basename(os.readlink(driver_path)) + + return driver + + +def device_devid(devname): + """Return the device id string for net device named 'devname'.""" + dev_id = read_sys_net_safe(devname, "device/device") + if dev_id is False: + return None + + return dev_id + + def get_devicelist(): return os.listdir(SYS_CLASS_NET) @@ -138,12 +162,21 @@ def is_disabled_cfg(cfg): return cfg.get('config') == "disabled" -def generate_fallback_config(): +def generate_fallback_config(blacklist_drivers=None, config_driver=None): """Determine which attached net dev is most likely to have a connection and generate network state to run dhcp on that interface""" + + if not config_driver: + config_driver = False + + if not blacklist_drivers: + blacklist_drivers = [] + # get list of interfaces that could have connections invalid_interfaces = set(['lo']) - potential_interfaces = set(get_devicelist()) + potential_interfaces = set([device for device in get_devicelist() + if device_driver(device) not in + blacklist_drivers]) potential_interfaces = potential_interfaces.difference(invalid_interfaces) # sort into interfaces with carrier, interfaces which could have carrier, # and ignore interfaces that are definitely disconnected @@ -155,6 +188,9 @@ def generate_fallback_config(): if is_bridge(interface): # skip any bridges continue + if is_bond(interface): + # skip any bonds + continue carrier = read_sys_net_int(interface, 'carrier') if carrier: connected.append(interface) @@ -194,9 +230,18 @@ def generate_fallback_config(): break if target_mac and target_name: nconf = {'config': [], 'version': 1} - nconf['config'].append( - {'type': 'physical', 'name': target_name, - 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]}) + cfg = {'type': 'physical', 'name': target_name, + 'mac_address': target_mac, 'subnets': [{'type': 'dhcp'}]} + # inject the device driver name, dev_id into config if enabled and + # device has a valid device driver value + if config_driver: + driver = device_driver(target_name) + if driver: + cfg['params'] = { + 'driver': driver, + 'device_id': device_devid(target_name), + } + nconf['config'].append(cfg) return nconf else: # can't read any interfaces addresses (or there are none); give up @@ -217,10 +262,16 @@ def apply_network_config_names(netcfg, strict_present=True, strict_busy=True): if ent.get('type') != 'physical': continue mac = ent.get('mac_address') - name = ent.get('name') if not mac: continue - renames.append([mac, name]) + name = ent.get('name') + driver = ent.get('params', {}).get('driver') + device_id = ent.get('params', {}).get('device_id') + if not driver: + driver = device_driver(name) + if not device_id: + device_id = device_devid(name) + renames.append([mac, name, driver, device_id]) return _rename_interfaces(renames) @@ -245,15 +296,27 @@ def _get_current_rename_info(check_downable=True): """Collect information necessary for rename_interfaces. returns a dictionary by mac address like: - {mac: - {'name': name - 'up': boolean: is_up(name), + {name: + { 'downable': None or boolean indicating that the - device has only automatically assigned ip addrs.}} + device has only automatically assigned ip addrs. + 'device_id': Device id value (if it has one) + 'driver': Device driver (if it has one) + 'mac': mac address + 'name': name + 'up': boolean: is_up(name) + }} """ - bymac = {} - for mac, name in get_interfaces_by_mac().items(): - bymac[mac] = {'name': name, 'up': is_up(name), 'downable': None} + cur_info = {} + for (name, mac, driver, device_id) in get_interfaces(): + cur_info[name] = { + 'downable': None, + 'device_id': device_id, + 'driver': driver, + 'mac': mac, + 'name': name, + 'up': is_up(name), + } if check_downable: nmatch = re.compile(r"[0-9]+:\s+(\w+)[@:]") @@ -265,11 +328,11 @@ def _get_current_rename_info(check_downable=True): for bytes_out in (ipv6, ipv4): nics_with_addresses.update(nmatch.findall(bytes_out)) - for d in bymac.values(): + for d in cur_info.values(): d['downable'] = (d['up'] is False or d['name'] not in nics_with_addresses) - return bymac + return cur_info def _rename_interfaces(renames, strict_present=True, strict_busy=True, @@ -282,15 +345,15 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True, if current_info is None: current_info = _get_current_rename_info() - cur_bymac = {} - for mac, data in current_info.items(): + cur_info = {} + for name, data in current_info.items(): cur = data.copy() - cur['mac'] = mac - cur_bymac[mac] = cur + cur['name'] = name + cur_info[name] = cur def update_byname(bymac): return dict((data['name'], data) - for data in bymac.values()) + for data in cur_info.values()) def rename(cur, new): util.subp(["ip", "link", "set", cur, "name", new], capture=True) @@ -304,14 +367,48 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True, ops = [] errors = [] ups = [] - cur_byname = update_byname(cur_bymac) + cur_byname = update_byname(cur_info) tmpname_fmt = "cirename%d" tmpi = -1 - for mac, new_name in renames: - cur = cur_bymac.get(mac, {}) - cur_name = cur.get('name') + def entry_match(data, mac, driver, device_id): + """match if set and in data""" + if mac and driver and device_id: + return (data['mac'] == mac and + data['driver'] == driver and + data['device_id'] == device_id) + elif mac and driver: + return (data['mac'] == mac and + data['driver'] == driver) + elif mac: + return (data['mac'] == mac) + + return False + + def find_entry(mac, driver, device_id): + match = [data for data in cur_info.values() + if entry_match(data, mac, driver, device_id)] + if len(match): + if len(match) > 1: + msg = ('Failed to match a single device. Matched devices "%s"' + ' with search values "(mac:%s driver:%s device_id:%s)"' + % (match, mac, driver, device_id)) + raise ValueError(msg) + return match[0] + + return None + + for mac, new_name, driver, device_id in renames: cur_ops = [] + cur = find_entry(mac, driver, device_id) + if not cur: + if strict_present: + errors.append( + "[nic not present] Cannot rename mac=%s to %s" + ", not available." % (mac, new_name)) + continue + + cur_name = cur.get('name') if cur_name == new_name: # nothing to do continue @@ -351,13 +448,13 @@ def _rename_interfaces(renames, strict_present=True, strict_busy=True, cur_ops.append(("rename", mac, new_name, (new_name, tmp_name))) target['name'] = tmp_name - cur_byname = update_byname(cur_bymac) + cur_byname = update_byname(cur_info) if target['up']: ups.append(("up", mac, new_name, (tmp_name,))) cur_ops.append(("rename", mac, new_name, (cur['name'], new_name))) cur['name'] = new_name - cur_byname = update_byname(cur_bymac) + cur_byname = update_byname(cur_info) ops += cur_ops opmap = {'rename': rename, 'down': down, 'up': up} @@ -426,6 +523,36 @@ def get_interfaces_by_mac(): return ret +def get_interfaces(): + """Return list of interface tuples (name, mac, driver, device_id) + + Bridges and any devices that have a 'stolen' mac are excluded.""" + try: + devs = get_devicelist() + except OSError as e: + if e.errno == errno.ENOENT: + devs = [] + else: + raise + ret = [] + empty_mac = '00:00:00:00:00:00' + for name in devs: + if not interface_has_own_mac(name): + continue + if is_bridge(name): + continue + if is_vlan(name): + continue + mac = get_interface_mac(name) + # some devices may not have a mac (tun0) + if not mac: + continue + if mac == empty_mac and name != 'lo': + continue + ret.append((name, mac, device_driver(name), device_devid(name))) + return ret + + class RendererNotFoundError(RuntimeError): pass diff --git a/cloudinit/net/eni.py b/cloudinit/net/eni.py index 98ce01e4..b707146c 100644 --- a/cloudinit/net/eni.py +++ b/cloudinit/net/eni.py @@ -72,6 +72,8 @@ def _iface_add_attrs(iface, index): content = [] ignore_map = [ 'control', + 'device_id', + 'driver', 'index', 'inet', 'mode', diff --git a/cloudinit/net/renderer.py b/cloudinit/net/renderer.py index c68658dc..bba139e5 100644 --- a/cloudinit/net/renderer.py +++ b/cloudinit/net/renderer.py @@ -34,8 +34,10 @@ class Renderer(object): for iface in network_state.iter_interfaces(filter_by_physical): # for physical interfaces write out a persist net udev rule if 'name' in iface and iface.get('mac_address'): + driver = iface.get('driver', None) content.write(generate_udev_rule(iface['name'], - iface['mac_address'])) + iface['mac_address'], + driver=driver)) return content.getvalue() @abc.abstractmethod diff --git a/cloudinit/net/udev.py b/cloudinit/net/udev.py index fd2fd8c7..58c0a708 100644 --- a/cloudinit/net/udev.py +++ b/cloudinit/net/udev.py @@ -23,7 +23,7 @@ def compose_udev_setting(key, value): return '%s="%s"' % (key, value) -def generate_udev_rule(interface, mac): +def generate_udev_rule(interface, mac, driver=None): """Return a udev rule to set the name of network interface with `mac`. The rule ends up as a single line looking something like: @@ -31,10 +31,13 @@ def generate_udev_rule(interface, mac): SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}="ff:ee:dd:cc:bb:aa", NAME="eth0" """ + if not driver: + driver = '?*' + rule = ', '.join([ compose_udev_equality('SUBSYSTEM', 'net'), compose_udev_equality('ACTION', 'add'), - compose_udev_equality('DRIVERS', '?*'), + compose_udev_equality('DRIVERS', driver), compose_udev_attr_equality('address', mac), compose_udev_setting('NAME', interface), ]) diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4fe0d635..b5a95a1f 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -16,6 +16,7 @@ from xml.dom import minidom import xml.etree.ElementTree as ET from cloudinit import log as logging +from cloudinit import net from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric from cloudinit import util @@ -245,7 +246,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'): set_hostname(previous_hostname, hostname_command) -class DataSourceAzureNet(sources.DataSource): +class DataSourceAzure(sources.DataSource): + _negotiated = False + def __init__(self, sys_cfg, distro, paths): sources.DataSource.__init__(self, sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, 'azure') @@ -255,6 +258,7 @@ class DataSourceAzureNet(sources.DataSource): util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}), BUILTIN_DS_CONFIG]) self.dhclient_lease_file = self.ds_cfg.get('dhclient_lease_file') + self._network_config = None def __str__(self): root = sources.DataSource.__str__(self) @@ -331,6 +335,7 @@ class DataSourceAzureNet(sources.DataSource): if asset_tag != AZURE_CHASSIS_ASSET_TAG: LOG.debug("Non-Azure DMI asset tag '%s' discovered.", asset_tag) return False + ddir = self.ds_cfg['data_dir'] candidates = [self.seed_dir] @@ -375,13 +380,14 @@ class DataSourceAzureNet(sources.DataSource): LOG.debug("using files cached in %s", ddir) # azure / hyper-v provides random data here + # TODO. find the seed on FreeBSD platform + # now update ds_cfg to reflect contents pass in config if not util.is_FreeBSD(): seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True, decode=False) if seed: self.metadata['random_seed'] = seed - # TODO. find the seed on FreeBSD platform - # now update ds_cfg to reflect contents pass in config + user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {}) self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg]) @@ -389,6 +395,40 @@ class DataSourceAzureNet(sources.DataSource): # the directory to be protected. write_files(ddir, files, dirmode=0o700) + self.metadata['instance-id'] = util.read_dmi_data('system-uuid') + + return True + + def device_name_to_device(self, name): + return self.ds_cfg['disk_aliases'].get(name) + + def get_config_obj(self): + return self.cfg + + def check_instance_id(self, sys_cfg): + # quickly (local check only) if self.instance_id is still valid + return sources.instance_id_matches_system_uuid(self.get_instance_id()) + + def setup(self, is_new_instance): + if self._negotiated is False: + LOG.debug("negotiating for %s (new_instance=%s)", + self.get_instance_id(), is_new_instance) + fabric_data = self._negotiate() + LOG.debug("negotiating returned %s", fabric_data) + if fabric_data: + self.metadata.update(fabric_data) + self._negotiated = True + else: + LOG.debug("negotiating already done for %s", + self.get_instance_id()) + + def _negotiate(self): + """Negotiate with fabric and return data from it. + + On success, returns a dictionary including 'public_keys'. + On failure, returns False. + """ + if self.ds_cfg['agent_command'] == AGENT_START_BUILTIN: self.bounce_network_with_azure_hostname() @@ -398,31 +438,64 @@ class DataSourceAzureNet(sources.DataSource): else: metadata_func = self.get_metadata_from_agent + LOG.debug("negotiating with fabric via agent command %s", + self.ds_cfg['agent_command']) try: fabric_data = metadata_func() except Exception as exc: - LOG.info("Error communicating with Azure fabric; assume we aren't" - " on Azure.", exc_info=True) + LOG.warning( + "Error communicating with Azure fabric; You may experience." + "connectivity issues.", exc_info=True) return False - self.metadata['instance-id'] = util.read_dmi_data('system-uuid') - self.metadata.update(fabric_data) - - return True - def device_name_to_device(self, name): - return self.ds_cfg['disk_aliases'].get(name) - - def get_config_obj(self): - return self.cfg - - def check_instance_id(self, sys_cfg): - # quickly (local check only) if self.instance_id is still valid - return sources.instance_id_matches_system_uuid(self.get_instance_id()) + return fabric_data def activate(self, cfg, is_new_instance): address_ephemeral_resize(is_new_instance=is_new_instance) return + @property + def network_config(self): + """Generate a network config like net.generate_fallback_network() with + the following execptions. + + 1. Probe the drivers of the net-devices present and inject them in + the network configuration under params: driver: value + 2. If the driver value is 'mlx4_core', the control mode should be + set to manual. The device will be later used to build a bond, + for now we want to ensure the device gets named but does not + break any network configuration + """ + blacklist = ['mlx4_core'] + if not self._network_config: + LOG.debug('Azure: generating fallback configuration') + # generate a network config, blacklist picking any mlx4_core devs + netconfig = net.generate_fallback_config( + blacklist_drivers=blacklist, config_driver=True) + + # if we have any blacklisted devices, update the network_config to + # include the device, mac, and driver values, but with no ip + # config; this ensures udev rules are generated but won't affect + # ip configuration + bl_found = 0 + for bl_dev in [dev for dev in net.get_devicelist() + if net.device_driver(dev) in blacklist]: + bl_found += 1 + cfg = { + 'type': 'physical', + 'name': 'vf%d' % bl_found, + 'mac_address': net.get_interface_mac(bl_dev), + 'params': { + 'driver': net.device_driver(bl_dev), + 'device_id': net.device_devid(bl_dev), + }, + } + netconfig['config'].append(cfg) + + self._network_config = netconfig + + return self._network_config + def _partitions_on_device(devpath, maxnum=16): # return a list of tuples (ptnum, path) for each part on devpath @@ -849,9 +922,12 @@ class NonAzureDataSource(Exception): pass +# Legacy: Must be present in case we load an old pkl object +DataSourceAzureNet = DataSourceAzure + # Used to match classes to dependencies datasources = [ - (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), + (DataSourceAzure, (sources.DEP_FILESYSTEM, )), ] diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index c3ce36d6..952caf35 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -251,10 +251,23 @@ class DataSource(object): def first_instance_boot(self): return + def setup(self, is_new_instance): + """setup(is_new_instance) + + This is called before user-data and vendor-data have been processed. + + Unless the datasource has set mode to 'local', then networking + per 'fallback' or per 'network_config' will have been written and + brought up the OS at this point. + """ + return + def activate(self, cfg, is_new_instance): """activate(cfg, is_new_instance) - This is called before the init_modules will be called. + This is called before the init_modules will be called but after + the user-data and vendor-data have been fully processed. + The cfg is fully up to date config, it contains a merged view of system config, datasource config, user config, vendor config. It should be used rather than the sys_cfg passed to __init__. diff --git a/cloudinit/stages.py b/cloudinit/stages.py index ad557827..a1c4a517 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -362,6 +362,11 @@ class Init(object): self._store_userdata() self._store_vendordata() + def setup_datasource(self): + if self.datasource is None: + raise RuntimeError("Datasource is None, cannot setup.") + self.datasource.setup(is_new_instance=self.is_new_instance()) + def activate_datasource(self): if self.datasource is None: raise RuntimeError("Datasource is None, cannot activate.") diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 7d33daf7..20e70fb7 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -181,13 +181,19 @@ scbus-1 on xpt0 bus 0 side_effect=_dmi_mocks)), ]) - dsrc = dsaz.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzure( data.get('sys_cfg', {}), distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command return dsrc + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + def xml_equals(self, oxml, nxml): """Compare two sets of XML to make sure they are equal""" @@ -259,7 +265,7 @@ fdescfs /dev/fd fdescfs rw 0 0 # Return a non-matching asset tag value nonazure_tag = dsaz.AZURE_CHASSIS_ASSET_TAG + 'X' m_read_dmi_data.return_value = nonazure_tag - dsrc = dsaz.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzure( {}, distro=None, paths=self.paths) self.assertFalse(dsrc.get_data()) self.assertEqual( @@ -299,7 +305,7 @@ fdescfs /dev/fd fdescfs rw 0 0 data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(data['agent_invoked'], cfg['agent_command']) @@ -312,7 +318,7 @@ fdescfs /dev/fd fdescfs rw 0 0 data = {'ovfcontent': construct_valid_ovf_env(data=odata)} dsrc = self._get_ds(data) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(data['agent_invoked'], cfg['agent_command']) @@ -322,7 +328,7 @@ fdescfs /dev/fd fdescfs rw 0 0 'sys_cfg': sys_cfg} dsrc = self._get_ds(data) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) self.assertEqual(data['agent_invoked'], '_COMMAND') @@ -394,7 +400,7 @@ fdescfs /dev/fd fdescfs rw 0 0 pubkeys=pubkeys)} dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) for mypk in mypklist: self.assertIn(mypk, dsrc.cfg['_pubkeys']) @@ -409,7 +415,7 @@ fdescfs /dev/fd fdescfs rw 0 0 pubkeys=pubkeys)} dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) for mypk in mypklist: @@ -425,7 +431,7 @@ fdescfs /dev/fd fdescfs rw 0 0 pubkeys=pubkeys)} dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) - ret = dsrc.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) for mypk in mypklist: @@ -519,18 +525,20 @@ fdescfs /dev/fd fdescfs rw 0 0 dsrc.get_data() def test_exception_fetching_fabric_data_doesnt_propagate(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.ds_cfg['agent_command'] = '__builtin__' + """Errors communicating with fabric should warn, but return True.""" + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc.ds_cfg['agent_command'] = '__builtin__' self.get_metadata_from_fabric.side_effect = Exception - self.assertFalse(ds.get_data()) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) def test_fabric_data_included_in_metadata(self): - ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) - ds.ds_cfg['agent_command'] = '__builtin__' + dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) + dsrc.ds_cfg['agent_command'] = '__builtin__' self.get_metadata_from_fabric.return_value = {'test': 'value'} - ret = ds.get_data() + ret = self._get_and_setup(dsrc) self.assertTrue(ret) - self.assertEqual('value', ds.metadata['test']) + self.assertEqual('value', dsrc.metadata['test']) def test_instance_id_from_dmidecode_used(self): ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()}) @@ -554,6 +562,84 @@ fdescfs /dev/fd fdescfs rw 0 0 self.assertEqual( [mock.call("/dev/cd0")], m_check_fbsd_cdrom.call_args_list) + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') + def test_network_config(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0'] + mock_dd.return_value = ['hv_netsvc'] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + netconfig = dsrc.network_config + self.assertEqual(netconfig, fallback_config) + mock_fallback.assert_called_with(blacklist_drivers=['mlx4_core'], + config_driver=True) + + @mock.patch('cloudinit.net.get_interface_mac') + @mock.patch('cloudinit.net.get_devicelist') + @mock.patch('cloudinit.net.device_driver') + @mock.patch('cloudinit.net.generate_fallback_config') + def test_network_config_blacklist(self, mock_fallback, mock_dd, + mock_devlist, mock_get_mac): + odata = {'HostName': "myhost", 'UserName': "myuser"} + data = {'ovfcontent': construct_valid_ovf_env(data=odata), + 'sys_cfg': {}} + + fallback_config = { + 'version': 1, + 'config': [{ + 'type': 'physical', 'name': 'eth0', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'hv_netsvc'}, + 'subnets': [{'type': 'dhcp'}], + }] + } + blacklist_config = { + 'type': 'physical', + 'name': 'eth1', + 'mac_address': '00:11:22:33:44:55', + 'params': {'driver': 'mlx4_core'} + } + mock_fallback.return_value = fallback_config + + mock_devlist.return_value = ['eth0', 'eth1'] + mock_dd.side_effect = [ + 'hv_netsvc', # list composition, skipped + 'mlx4_core', # list composition, match + 'mlx4_core', # config get driver name + ] + mock_get_mac.return_value = '00:11:22:33:44:55' + + dsrc = self._get_ds(data) + ret = dsrc.get_data() + self.assertTrue(ret) + + netconfig = dsrc.network_config + expected_config = fallback_config + expected_config['config'].append(blacklist_config) + self.assertEqual(netconfig, expected_config) + class TestAzureBounce(TestCase): @@ -603,12 +689,18 @@ class TestAzureBounce(TestCase): if ovfcontent is not None: populate_dir(os.path.join(self.paths.seed_dir, "azure"), {'ovf-env.xml': ovfcontent}) - dsrc = dsaz.DataSourceAzureNet( + dsrc = dsaz.DataSourceAzure( {}, distro=None, paths=self.paths) if agent_command is not None: dsrc.ds_cfg['agent_command'] = agent_command return dsrc + def _get_and_setup(self, dsrc): + ret = dsrc.get_data() + if ret: + dsrc.setup(True) + return ret + def get_ovf_env_with_dscfg(self, hostname, cfg): odata = { 'HostName': hostname, @@ -652,17 +744,20 @@ class TestAzureBounce(TestCase): host_name = 'unchanged-host-name' self.get_hostname.return_value = host_name cfg = {'hostname_bounce': {'policy': 'force'}} - self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), - agent_command=['not', '__builtin__']).get_data() + dsrc = self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg), + agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) def test_different_hostnames_sets_hostname(self): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' - self._get_ds( + dsrc = self._get_ds( self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__'], - ).get_data() + agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(expected_hostname, self.set_hostname.call_args_list[0][0][0]) @@ -671,19 +766,21 @@ class TestAzureBounce(TestCase): self, perform_hostname_bounce): expected_hostname = 'azure-expected-host-name' self.get_hostname.return_value = 'default-host-name' - self._get_ds( + dsrc = self._get_ds( self.get_ovf_env_with_dscfg(expected_hostname, {}), - agent_command=['not', '__builtin__'], - ).get_data() + agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(1, perform_hostname_bounce.call_count) def test_different_hostnames_sets_hostname_back(self): initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name - self._get_ds( + dsrc = self._get_ds( self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__'], - ).get_data() + agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) @@ -693,10 +790,11 @@ class TestAzureBounce(TestCase): perform_hostname_bounce.side_effect = Exception initial_host_name = 'default-host-name' self.get_hostname.return_value = initial_host_name - self._get_ds( + dsrc = self._get_ds( self.get_ovf_env_with_dscfg('some-host-name', {}), - agent_command=['not', '__builtin__'], - ).get_data() + agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(initial_host_name, self.set_hostname.call_args_list[-1][0][0]) @@ -707,7 +805,9 @@ class TestAzureBounce(TestCase): self.get_hostname.return_value = old_hostname cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}} data = self.get_ovf_env_with_dscfg(hostname, cfg) - self._get_ds(data, agent_command=['not', '__builtin__']).get_data() + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) bounce_env = self.subp.call_args[1]['env'] self.assertEqual(interface, bounce_env['interface']) @@ -719,7 +819,9 @@ class TestAzureBounce(TestCase): dsaz.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd cfg = {'hostname_bounce': {'policy': 'force'}} data = self.get_ovf_env_with_dscfg('some-hostname', cfg) - self._get_ds(data, agent_command=['not', '__builtin__']).get_data() + dsrc = self._get_ds(data, agent_command=['not', '__builtin__']) + ret = self._get_and_setup(dsrc) + self.assertTrue(ret) self.assertEqual(1, self.subp.call_count) bounce_args = self.subp.call_args[1]['args'] self.assertEqual(cmd, bounce_args) @@ -975,4 +1077,12 @@ class TestCanDevBeReformatted(CiTestCase): self.assertEqual(False, value) self.assertIn("3 or more", msg.lower()) + +class TestAzureNetExists(CiTestCase): + def test_azure_net_must_exist_for_legacy_objpkl(self): + """DataSourceAzureNet must exist for old obj.pkl files + that reference it.""" + self.assertTrue(hasattr(dsaz, "DataSourceAzureNet")) + + # vi: ts=4 expandtab diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 7649b9ae..2ff1d9df 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -26,6 +26,7 @@ from cloudinit.sources import DataSourceNone as DSNone from .. import helpers as test_helpers DEFAULT_LOCAL = [ + Azure.DataSourceAzure, CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, DigitalOcean.DataSourceDigitalOcean, @@ -38,7 +39,6 @@ DEFAULT_LOCAL = [ DEFAULT_NETWORK = [ AliYun.DataSourceAliYun, AltCloud.DataSourceAltCloud, - Azure.DataSourceAzureNet, Bigstep.DataSourceBigstep, CloudStack.DataSourceCloudStack, DSNone.DataSourceNone, diff --git a/tests/unittests/test_net.py b/tests/unittests/test_net.py index 8edc0b89..06e8f094 100644 --- a/tests/unittests/test_net.py +++ b/tests/unittests/test_net.py @@ -836,38 +836,176 @@ CONFIG_V1_EXPLICIT_LOOPBACK = { 'subnets': [{'control': 'auto', 'type': 'loopback'}]}, ]} +DEFAULT_DEV_ATTRS = { + 'eth1000': { + "bridge": False, + "carrier": False, + "dormant": False, + "operstate": "down", + "address": "07-1C-C6-75-A4-BE", + "device/driver": None, + "device/device": None, + } +} + def _setup_test(tmp_dir, mock_get_devicelist, mock_read_sys_net, - mock_sys_dev_path): - mock_get_devicelist.return_value = ['eth1000'] - dev_characteristics = { - 'eth1000': { - "bridge": False, - "carrier": False, - "dormant": False, - "operstate": "down", - "address": "07-1C-C6-75-A4-BE", - } - } + mock_sys_dev_path, dev_attrs=None): + if not dev_attrs: + dev_attrs = DEFAULT_DEV_ATTRS + + mock_get_devicelist.return_value = dev_attrs.keys() def fake_read(devname, path, translate=None, on_enoent=None, on_keyerror=None, on_einval=None): - return dev_characteristics[devname][path] + return dev_attrs[devname][path] mock_read_sys_net.side_effect = fake_read def sys_dev_path(devname, path=""): - return tmp_dir + devname + "/" + path + return tmp_dir + "/" + devname + "/" + path - for dev in dev_characteristics: + for dev in dev_attrs: os.makedirs(os.path.join(tmp_dir, dev)) with open(os.path.join(tmp_dir, dev, 'operstate'), 'w') as fh: - fh.write("down") + fh.write(dev_attrs[dev]['operstate']) + os.makedirs(os.path.join(tmp_dir, dev, "device")) + for key in ['device/driver']: + if key in dev_attrs[dev] and dev_attrs[dev][key]: + target = dev_attrs[dev][key] + link = os.path.join(tmp_dir, dev, key) + print('symlink %s -> %s' % (link, target)) + os.symlink(target, link) mock_sys_dev_path.side_effect = sys_dev_path +class TestGenerateFallbackConfig(CiTestCase): + + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver(self, mock_get_devicelist, mock_read_sys_net, + mock_sys_dev_path): + devices = { + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + network_cfg = net.generate_fallback_config(config_driver=True) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + # don't set rulepath so eni writes them + renderer = eni.Renderer( + {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) + renderer.render_network_state(ns, render_dir) + + self.assertTrue(os.path.exists(os.path.join(render_dir, + 'interfaces'))) + with open(os.path.join(render_dir, 'interfaces')) as fh: + contents = fh.read() + print(contents) + expected = """ +auto lo +iface lo inet loopback + +auto eth0 +iface eth0 inet dhcp +""" + self.assertEqual(expected.lstrip(), contents.lstrip()) + + self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) + with open(os.path.join(render_dir, 'netrules')) as fh: + contents = fh.read() + print(contents) + expected_rule = [ + 'SUBSYSTEM=="net"', + 'ACTION=="add"', + 'DRIVERS=="hv_netsvc"', + 'ATTR{address}=="00:11:22:33:44:55"', + 'NAME="eth0"', + ] + self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + + @mock.patch("cloudinit.net.sys_dev_path") + @mock.patch("cloudinit.net.read_sys_net") + @mock.patch("cloudinit.net.get_devicelist") + def test_device_driver_blacklist(self, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path): + devices = { + 'eth1': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'hv_netsvc', 'device/device': '0x3'}, + 'eth0': { + 'bridge': False, 'carrier': False, 'dormant': False, + 'operstate': 'down', 'address': '00:11:22:33:44:55', + 'device/driver': 'mlx4_core', 'device/device': '0x7'}, + } + + tmp_dir = self.tmp_dir() + _setup_test(tmp_dir, mock_get_devicelist, + mock_read_sys_net, mock_sys_dev_path, + dev_attrs=devices) + + blacklist = ['mlx4_core'] + network_cfg = net.generate_fallback_config(blacklist_drivers=blacklist, + config_driver=True) + ns = network_state.parse_net_config_data(network_cfg, + skip_broken=False) + + render_dir = os.path.join(tmp_dir, "render") + os.makedirs(render_dir) + + # don't set rulepath so eni writes them + renderer = eni.Renderer( + {'eni_path': 'interfaces', 'netrules_path': 'netrules'}) + renderer.render_network_state(ns, render_dir) + + self.assertTrue(os.path.exists(os.path.join(render_dir, + 'interfaces'))) + with open(os.path.join(render_dir, 'interfaces')) as fh: + contents = fh.read() + print(contents) + expected = """ +auto lo +iface lo inet loopback + +auto eth1 +iface eth1 inet dhcp +""" + self.assertEqual(expected.lstrip(), contents.lstrip()) + + self.assertTrue(os.path.exists(os.path.join(render_dir, 'netrules'))) + with open(os.path.join(render_dir, 'netrules')) as fh: + contents = fh.read() + print(contents) + expected_rule = [ + 'SUBSYSTEM=="net"', + 'ACTION=="add"', + 'DRIVERS=="hv_netsvc"', + 'ATTR{address}=="00:11:22:33:44:55"', + 'NAME="eth1"', + ] + self.assertEqual(", ".join(expected_rule) + '\n', contents.lstrip()) + + class TestSysConfigRendering(CiTestCase): @mock.patch("cloudinit.net.sys_dev_path") @@ -1560,6 +1698,118 @@ class TestNetRenderers(CiTestCase): priority=['sysconfig', 'eni']) +class TestGetInterfaces(CiTestCase): + _data = {'bonds': ['bond1'], + 'bridges': ['bridge1'], + 'vlans': ['bond1.101'], + 'own_macs': ['enp0s1', 'enp0s2', 'bridge1-nic', 'bridge1', + 'bond1.101', 'lo', 'eth1'], + 'macs': {'enp0s1': 'aa:aa:aa:aa:aa:01', + 'enp0s2': 'aa:aa:aa:aa:aa:02', + 'bond1': 'aa:aa:aa:aa:aa:01', + 'bond1.101': 'aa:aa:aa:aa:aa:01', + 'bridge1': 'aa:aa:aa:aa:aa:03', + 'bridge1-nic': 'aa:aa:aa:aa:aa:03', + 'lo': '00:00:00:00:00:00', + 'greptap0': '00:00:00:00:00:00', + 'eth1': 'aa:aa:aa:aa:aa:01', + 'tun0': None}, + 'drivers': {'enp0s1': 'virtio_net', + 'enp0s2': 'e1000', + 'bond1': None, + 'bond1.101': None, + 'bridge1': None, + 'bridge1-nic': None, + 'lo': None, + 'greptap0': None, + 'eth1': 'mlx4_core', + 'tun0': None}} + data = {} + + def _se_get_devicelist(self): + return list(self.data['devices']) + + def _se_device_driver(self, name): + return self.data['drivers'][name] + + def _se_device_devid(self, name): + return '0x%s' % sorted(list(self.data['drivers'].keys())).index(name) + + def _se_get_interface_mac(self, name): + return self.data['macs'][name] + + def _se_is_bridge(self, name): + return name in self.data['bridges'] + + def _se_is_vlan(self, name): + return name in self.data['vlans'] + + def _se_interface_has_own_mac(self, name): + return name in self.data['own_macs'] + + def _mock_setup(self): + self.data = copy.deepcopy(self._data) + self.data['devices'] = set(list(self.data['macs'].keys())) + mocks = ('get_devicelist', 'get_interface_mac', 'is_bridge', + 'interface_has_own_mac', 'is_vlan', 'device_driver', + 'device_devid') + self.mocks = {} + for n in mocks: + m = mock.patch('cloudinit.net.' + n, + side_effect=getattr(self, '_se_' + n)) + self.addCleanup(m.stop) + self.mocks[n] = m.start() + + def test_gi_includes_duplicate_macs(self): + self._mock_setup() + ret = net.get_interfaces() + + self.assertIn('enp0s1', self._se_get_devicelist()) + self.assertIn('eth1', self._se_get_devicelist()) + found = [ent for ent in ret if 'aa:aa:aa:aa:aa:01' in ent] + self.assertEqual(len(found), 2) + + def test_gi_excludes_any_without_mac_address(self): + self._mock_setup() + ret = net.get_interfaces() + + self.assertIn('tun0', self._se_get_devicelist()) + found = [ent for ent in ret if 'tun0' in ent] + self.assertEqual(len(found), 0) + + def test_gi_excludes_stolen_macs(self): + self._mock_setup() + ret = net.get_interfaces() + self.mocks['interface_has_own_mac'].assert_has_calls( + [mock.call('enp0s1'), mock.call('bond1')], any_order=True) + expected = [ + ('enp0s2', 'aa:aa:aa:aa:aa:02', 'e1000', '0x5'), + ('enp0s1', 'aa:aa:aa:aa:aa:01', 'virtio_net', '0x4'), + ('eth1', 'aa:aa:aa:aa:aa:01', 'mlx4_core', '0x6'), + ('lo', '00:00:00:00:00:00', None, '0x8'), + ('bridge1-nic', 'aa:aa:aa:aa:aa:03', None, '0x3'), + ] + self.assertEqual(sorted(expected), sorted(ret)) + + def test_gi_excludes_bridges(self): + self._mock_setup() + # add a device 'b1', make all return they have their "own mac", + # set everything other than 'b1' to be a bridge. + # then expect b1 is the only thing left. + self.data['macs']['b1'] = 'aa:aa:aa:aa:aa:b1' + self.data['drivers']['b1'] = None + self.data['devices'].add('b1') + self.data['bonds'] = [] + self.data['own_macs'] = self.data['devices'] + self.data['bridges'] = [f for f in self.data['devices'] if f != "b1"] + ret = net.get_interfaces() + self.assertEqual([('b1', 'aa:aa:aa:aa:aa:b1', None, '0x0')], ret) + self.mocks['is_bridge'].assert_has_calls( + [mock.call('bridge1'), mock.call('enp0s1'), mock.call('bond1'), + mock.call('b1')], + any_order=True) + + class TestGetInterfacesByMac(CiTestCase): _data = {'bonds': ['bond1'], 'bridges': ['bridge1'], @@ -1691,4 +1941,202 @@ def _gzip_data(data): gzfp.close() return iobuf.getvalue() + +class TestRenameInterfaces(CiTestCase): + + @mock.patch('cloudinit.util.subp') + def test_rename_all(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), + ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), + ] + current_info = { + 'ens3': { + 'downable': True, + 'device_id': '0x3', + 'driver': 'virtio_net', + 'mac': '00:11:22:33:44:55', + 'name': 'ens3', + 'up': False}, + 'ens5': { + 'downable': True, + 'device_id': '0x5', + 'driver': 'virtio_net', + 'mac': '00:11:22:33:44:aa', + 'name': 'ens5', + 'up': False}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], + capture=True), + mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], + capture=True), + ]) + + @mock.patch('cloudinit.util.subp') + def test_rename_no_driver_no_device_id(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'interface0', None, None), + ('00:11:22:33:44:aa', 'interface1', None, None), + ] + current_info = { + 'eth0': { + 'downable': True, + 'device_id': None, + 'driver': None, + 'mac': '00:11:22:33:44:55', + 'name': 'eth0', + 'up': False}, + 'eth1': { + 'downable': True, + 'device_id': None, + 'driver': None, + 'mac': '00:11:22:33:44:aa', + 'name': 'eth1', + 'up': False}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'eth0', 'name', 'interface0'], + capture=True), + mock.call(['ip', 'link', 'set', 'eth1', 'name', 'interface1'], + capture=True), + ]) + + @mock.patch('cloudinit.util.subp') + def test_rename_all_bounce(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'interface0', 'virtio_net', '0x3'), + ('00:11:22:33:44:aa', 'interface2', 'virtio_net', '0x5'), + ] + current_info = { + 'ens3': { + 'downable': True, + 'device_id': '0x3', + 'driver': 'virtio_net', + 'mac': '00:11:22:33:44:55', + 'name': 'ens3', + 'up': True}, + 'ens5': { + 'downable': True, + 'device_id': '0x5', + 'driver': 'virtio_net', + 'mac': '00:11:22:33:44:aa', + 'name': 'ens5', + 'up': True}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'ens3', 'down'], capture=True), + mock.call(['ip', 'link', 'set', 'ens3', 'name', 'interface0'], + capture=True), + mock.call(['ip', 'link', 'set', 'ens5', 'down'], capture=True), + mock.call(['ip', 'link', 'set', 'ens5', 'name', 'interface2'], + capture=True), + mock.call(['ip', 'link', 'set', 'interface0', 'up'], capture=True), + mock.call(['ip', 'link', 'set', 'interface2', 'up'], capture=True) + ]) + + @mock.patch('cloudinit.util.subp') + def test_rename_duplicate_macs(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), + ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), + ] + current_info = { + 'eth0': { + 'downable': True, + 'device_id': '0x3', + 'driver': 'hv_netsvc', + 'mac': '00:11:22:33:44:55', + 'name': 'eth0', + 'up': False}, + 'eth1': { + 'downable': True, + 'device_id': '0x5', + 'driver': 'mlx4_core', + 'mac': '00:11:22:33:44:55', + 'name': 'eth1', + 'up': False}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], + capture=True), + ]) + + @mock.patch('cloudinit.util.subp') + def test_rename_duplicate_macs_driver_no_devid(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', None), + ('00:11:22:33:44:55', 'vf1', 'mlx4_core', None), + ] + current_info = { + 'eth0': { + 'downable': True, + 'device_id': '0x3', + 'driver': 'hv_netsvc', + 'mac': '00:11:22:33:44:55', + 'name': 'eth0', + 'up': False}, + 'eth1': { + 'downable': True, + 'device_id': '0x5', + 'driver': 'mlx4_core', + 'mac': '00:11:22:33:44:55', + 'name': 'eth1', + 'up': False}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], + capture=True), + ]) + + @mock.patch('cloudinit.util.subp') + def test_rename_multi_mac_dups(self, mock_subp): + renames = [ + ('00:11:22:33:44:55', 'eth0', 'hv_netsvc', '0x3'), + ('00:11:22:33:44:55', 'vf1', 'mlx4_core', '0x5'), + ('00:11:22:33:44:55', 'vf2', 'mlx4_core', '0x7'), + ] + current_info = { + 'eth0': { + 'downable': True, + 'device_id': '0x3', + 'driver': 'hv_netsvc', + 'mac': '00:11:22:33:44:55', + 'name': 'eth0', + 'up': False}, + 'eth1': { + 'downable': True, + 'device_id': '0x5', + 'driver': 'mlx4_core', + 'mac': '00:11:22:33:44:55', + 'name': 'eth1', + 'up': False}, + 'eth2': { + 'downable': True, + 'device_id': '0x7', + 'driver': 'mlx4_core', + 'mac': '00:11:22:33:44:55', + 'name': 'eth2', + 'up': False}, + } + net._rename_interfaces(renames, current_info=current_info) + print(mock_subp.call_args_list) + mock_subp.assert_has_calls([ + mock.call(['ip', 'link', 'set', 'eth1', 'name', 'vf1'], + capture=True), + mock.call(['ip', 'link', 'set', 'eth2', 'name', 'vf2'], + capture=True), + ]) + + # vi: ts=4 expandtab -- cgit v1.2.3 From e80517ae6aea49c9ab3bd622a33fee44014f485f Mon Sep 17 00:00:00 2001 From: Julien Castets Date: Tue, 25 Apr 2017 09:06:13 +0000 Subject: Scaleway: add datasource with user and vendor data for Scaleway. Here we add and enable by default a datasource for Scaleway cloud. The datasource quickly exits unless one of three things: a.) 'Scaleway' found as the system vendor b.) 'scaleway' found on the kernel command line. c.) the directory /var/run/scaleway exists (this is currently created by the scaleway initramfs module). One interesting bit of this particular datasource is that it requires the source port of the http request to be < 1024. --- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceScaleway.py | 234 ++++++++++++++++++++ cloudinit/url_helper.py | 10 +- tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_scaleway.py | 262 +++++++++++++++++++++++ tools/ds-identify | 18 +- 6 files changed, 524 insertions(+), 3 deletions(-) create mode 100644 cloudinit/sources/DataSourceScaleway.py create mode 100644 tests/unittests/test_datasource/test_scaleway.py (limited to 'cloudinit/sources') diff --git a/cloudinit/settings.py b/cloudinit/settings.py index 0abd8a4a..c120498f 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -35,6 +35,7 @@ CFG_BUILTIN = { 'CloudStack', 'SmartOS', 'Bigstep', + 'Scaleway', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py new file mode 100644 index 00000000..3a8a8e8f --- /dev/null +++ b/cloudinit/sources/DataSourceScaleway.py @@ -0,0 +1,234 @@ +# Author: Julien Castets +# +# This file is part of cloud-init. See LICENSE file for license information. + +# Scaleway API: +# https://developer.scaleway.com/#metadata + +import json +import os +import socket +import time + +import requests + +# pylint fails to import the two modules below. +# These are imported via requests.packages rather than urllib3 because: +# a.) the provider of the requests package should ensure that urllib3 +# contained in it is consistent/correct. +# b.) cloud-init does not specifically have a dependency on urllib3 +# +# For future reference, see: +# https://github.com/kennethreitz/requests/pull/2375 +# https://github.com/requests/requests/issues/4104 +# pylint: disable=E0401 +from requests.packages.urllib3.connection import HTTPConnection +from requests.packages.urllib3.poolmanager import PoolManager + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit import url_helper +from cloudinit import util + + +LOG = logging.getLogger(__name__) + +DS_BASE_URL = 'http://169.254.42.42' + +BUILTIN_DS_CONFIG = { + 'metadata_url': DS_BASE_URL + '/conf?format=json', + 'userdata_url': DS_BASE_URL + '/user_data/cloud-init', + 'vendordata_url': DS_BASE_URL + '/vendor_data/cloud-init' +} + +DEF_MD_RETRIES = 5 +DEF_MD_TIMEOUT = 10 + + +def on_scaleway(): + """ + There are three ways to detect if you are on Scaleway: + + * check DMI data: not yet implemented by Scaleway, but the check is made to + be future-proof. + * the initrd created the file /var/run/scaleway. + * "scaleway" is in the kernel cmdline. + """ + vendor_name = util.read_dmi_data('system-manufacturer') + if vendor_name == 'Scaleway': + return True + + if os.path.exists('/var/run/scaleway'): + return True + + cmdline = util.get_cmdline() + if 'scaleway' in cmdline: + return True + + return False + + +class SourceAddressAdapter(requests.adapters.HTTPAdapter): + """ + Adapter for requests to choose the local address to bind to. + """ + def __init__(self, source_address, **kwargs): + self.source_address = source_address + super(SourceAddressAdapter, self).__init__(**kwargs) + + def init_poolmanager(self, connections, maxsize, block=False): + socket_options = HTTPConnection.default_socket_options + [ + (socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + ] + self.poolmanager = PoolManager(num_pools=connections, + maxsize=maxsize, + block=block, + source_address=self.source_address, + socket_options=socket_options) + + +def query_data_api_once(api_address, timeout, requests_session): + """ + Retrieve user data or vendor data. + + Scaleway user/vendor data API returns HTTP/404 if user/vendor data is not + set. + + This function calls `url_helper.readurl` but instead of considering + HTTP/404 as an error that requires a retry, it considers it as empty + user/vendor data. + + Also, be aware the user data/vendor API requires the source port to be + below 1024 to ensure the client is root (since non-root users can't bind + ports below 1024). If requests raises ConnectionError (EADDRINUSE), the + caller should retry to call this function on an other port. + """ + try: + resp = url_helper.readurl( + api_address, + data=None, + timeout=timeout, + # It's the caller's responsability to recall this function in case + # of exception. Don't let url_helper.readurl() retry by itself. + retries=0, + session=requests_session, + # If the error is a HTTP/404 or a ConnectionError, go into raise + # block below. + exception_cb=lambda _, exc: exc.code == 404 or ( + isinstance(exc.cause, requests.exceptions.ConnectionError) + ) + ) + return util.decode_binary(resp.contents) + except url_helper.UrlError as exc: + # Empty user data. + if exc.code == 404: + return None + raise + + +def query_data_api(api_type, api_address, retries, timeout): + """Get user or vendor data. + + Handle the retrying logic in case the source port is used. + + Scaleway metadata service requires the source port of the client to + be a privileged port (<1024). This is done to ensure that only a + privileged user on the system can access the metadata service. + """ + # Query user/vendor data. Try to make a request on the first privileged + # port available. + for port in range(1, max(retries, 2)): + try: + LOG.debug( + 'Trying to get %s data (bind on port %d)...', + api_type, port + ) + requests_session = requests.Session() + requests_session.mount( + 'http://', + SourceAddressAdapter(source_address=('0.0.0.0', port)) + ) + data = query_data_api_once( + api_address, + timeout=timeout, + requests_session=requests_session + ) + LOG.debug('%s-data downloaded', api_type) + return data + + except url_helper.UrlError as exc: + # Local port already in use or HTTP/429. + LOG.warning('Error while trying to get %s data: %s', api_type, exc) + time.sleep(5) + last_exc = exc + continue + + # Max number of retries reached. + raise last_exc + + +class DataSourceScaleway(sources.DataSource): + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceScaleway, self).__init__(sys_cfg, distro, paths) + + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Scaleway"], {}), + BUILTIN_DS_CONFIG + ]) + + self.metadata_address = self.ds_cfg['metadata_url'] + self.userdata_address = self.ds_cfg['userdata_url'] + self.vendordata_address = self.ds_cfg['vendordata_url'] + + self.retries = int(self.ds_cfg.get('retries', DEF_MD_RETRIES)) + self.timeout = int(self.ds_cfg.get('timeout', DEF_MD_TIMEOUT)) + + def get_data(self): + if not on_scaleway(): + return False + + resp = url_helper.readurl(self.metadata_address, + timeout=self.timeout, + retries=self.retries) + self.metadata = json.loads(util.decode_binary(resp.contents)) + + self.userdata_raw = query_data_api( + 'user-data', self.userdata_address, + self.retries, self.timeout + ) + self.vendordata_raw = query_data_api( + 'vendor-data', self.vendordata_address, + self.retries, self.timeout + ) + return True + + @property + def launch_index(self): + return None + + def get_instance_id(self): + return self.metadata['id'] + + def get_public_ssh_keys(self): + return [key['key'] for key in self.metadata['ssh_public_keys']] + + def get_hostname(self, fqdn=False, resolve_ip=False): + return self.metadata['hostname'] + + @property + def availability_zone(self): + return None + + @property + def region(self): + return None + + +datasources = [ + (DataSourceScaleway, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)), +] + + +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index d2b92e6a..7cf76aae 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -172,7 +172,8 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, - check_status=True, allow_redirects=True, exception_cb=None): + check_status=True, allow_redirects=True, exception_cb=None, + session=None): url = _cleanurl(url) req_args = { 'url': url, @@ -231,7 +232,12 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, LOG.debug("[%s/%s] open '%s' with %s configuration", i, manual_tries, url, filtered_req_args) - r = requests.request(**req_args) + if session is None: + session = requests.Session() + + with session as sess: + r = sess.request(**req_args) + if check_status: r.raise_for_status() LOG.debug("Read from %s (%s, %sb) after %s attempts", url, diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 2ff1d9df..413e87ac 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -19,6 +19,7 @@ from cloudinit.sources import ( DataSourceOpenNebula as OpenNebula, DataSourceOpenStack as OpenStack, DataSourceOVF as OVF, + DataSourceScaleway as Scaleway, DataSourceSmartOS as SmartOS, ) from cloudinit.sources import DataSourceNone as DSNone @@ -48,6 +49,7 @@ DEFAULT_NETWORK = [ NoCloud.DataSourceNoCloudNet, OpenStack.DataSourceOpenStack, OVF.DataSourceOVFNet, + Scaleway.DataSourceScaleway, ] diff --git a/tests/unittests/test_datasource/test_scaleway.py b/tests/unittests/test_datasource/test_scaleway.py new file mode 100644 index 00000000..65d83ad7 --- /dev/null +++ b/tests/unittests/test_datasource/test_scaleway.py @@ -0,0 +1,262 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import json + +import httpretty +import requests + +from cloudinit import helpers +from cloudinit import settings +from cloudinit.sources import DataSourceScaleway + +from ..helpers import mock, HttprettyTestCase, TestCase + + +class DataResponses(object): + """ + Possible responses of the API endpoint + 169.254.42.42/user_data/cloud-init and + 169.254.42.42/vendor_data/cloud-init. + """ + + FAKE_USER_DATA = '#!/bin/bash\necho "user-data"' + + @staticmethod + def rate_limited(method, uri, headers): + return 429, headers, '' + + @staticmethod + def api_error(method, uri, headers): + return 500, headers, '' + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, cls.FAKE_USER_DATA + + @staticmethod + def empty(method, uri, headers): + """ + No user data for this server. + """ + return 404, headers, '' + + +class MetadataResponses(object): + """ + Possible responses of the metadata API. + """ + + FAKE_METADATA = { + 'id': '00000000-0000-0000-0000-000000000000', + 'hostname': 'scaleway.host', + 'ssh_public_keys': [{ + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABA', + 'fingerprint': '2048 06:ae:... login (RSA)' + }, { + 'key': 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABCCCCC', + 'fingerprint': '2048 06:ff:... login2 (RSA)' + }] + } + + @classmethod + def get_ok(cls, method, uri, headers): + return 200, headers, json.dumps(cls.FAKE_METADATA) + + +class TestOnScaleway(TestCase): + + def install_mocks(self, fake_dmi, fake_file_exists, fake_cmdline): + mock, faked = fake_dmi + mock.return_value = 'Scaleway' if faked else 'Whatever' + + mock, faked = fake_file_exists + mock.return_value = faked + + mock, faked = fake_cmdline + mock.return_value = \ + 'initrd=initrd showopts scaleway nousb' if faked \ + else 'BOOT_IMAGE=/vmlinuz-3.11.0-26-generic' + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_not_on_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertFalse(DataSourceScaleway.on_scaleway()) + + # When not on Scaleway, get_data() returns False. + datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) + self.assertFalse(datasource.get_data()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_dmi(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + dmidecode returns "Scaleway". + """ + # dmidecode returns "Scaleway" + self.install_mocks( + fake_dmi=(m_read_dmi_data, True), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_var_run_scaleway(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + /var/run/scaleway exists. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, True), + fake_cmdline=(m_get_cmdline, False) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('os.path.exists') + @mock.patch('cloudinit.util.read_dmi_data') + def test_on_scaleway_cmdline(self, m_read_dmi_data, m_file_exists, + m_get_cmdline): + """ + "scaleway" in /proc/cmdline. + """ + self.install_mocks( + fake_dmi=(m_read_dmi_data, False), + fake_file_exists=(m_file_exists, False), + fake_cmdline=(m_get_cmdline, True) + ) + self.assertTrue(DataSourceScaleway.on_scaleway()) + + +def get_source_address_adapter(*args, **kwargs): + """ + Scaleway user/vendor data API requires to be called with a privileged port. + + If the unittests are run as non-root, the user doesn't have the permission + to bind on ports below 1024. + + This function removes the bind on a privileged address, since anyway the + HTTP call is mocked by httpretty. + """ + kwargs.pop('source_address') + return requests.adapters.HTTPAdapter(*args, **kwargs) + + +class TestDataSourceScaleway(HttprettyTestCase): + + def setUp(self): + self.datasource = DataSourceScaleway.DataSourceScaleway( + settings.CFG_BUILTIN, None, helpers.Paths({}) + ) + super(TestDataSourceScaleway, self).setUp() + + self.metadata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['metadata_url'] + self.userdata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['userdata_url'] + self.vendordata_url = \ + DataSourceScaleway.BUILTIN_DS_CONFIG['vendordata_url'] + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_ok(self, sleep, m_get_cmdline): + """ + get_data() returns metadata, user data and vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user data API return a valid response + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.get_ok) + self.datasource.get_data() + + self.assertEqual(self.datasource.get_instance_id(), + MetadataResponses.FAKE_METADATA['id']) + self.assertEqual(self.datasource.get_public_ssh_keys(), [ + elem['key'] for elem in + MetadataResponses.FAKE_METADATA['ssh_public_keys'] + ]) + self.assertEqual(self.datasource.get_hostname(), + MetadataResponses.FAKE_METADATA['hostname']) + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(self.datasource.get_vendordata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertIsNone(self.datasource.availability_zone) + self.assertIsNone(self.datasource.region) + self.assertEqual(sleep.call_count, 0) + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_404(self, sleep, m_get_cmdline): + """ + get_data() returns metadata, but no user data nor vendor data. + """ + m_get_cmdline.return_value = 'scaleway' + + # Make user and vendor data APIs return HTTP/404, which means there is + # no user / vendor data for the server. + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.userdata_url, + body=DataResponses.empty) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + self.datasource.get_data() + self.assertIsNone(self.datasource.get_userdata_raw()) + self.assertIsNone(self.datasource.get_vendordata_raw()) + self.assertEqual(sleep.call_count, 0) + + @httpretty.activate + @mock.patch('cloudinit.sources.DataSourceScaleway.SourceAddressAdapter', + get_source_address_adapter) + @mock.patch('cloudinit.util.get_cmdline') + @mock.patch('time.sleep', return_value=None) + def test_metadata_rate_limit(self, sleep, m_get_cmdline): + """ + get_data() is rate limited two times by the metadata API when fetching + user data. + """ + m_get_cmdline.return_value = 'scaleway' + + httpretty.register_uri(httpretty.GET, self.metadata_url, + body=MetadataResponses.get_ok) + httpretty.register_uri(httpretty.GET, self.vendordata_url, + body=DataResponses.empty) + + httpretty.register_uri( + httpretty.GET, self.userdata_url, + responses=[ + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.rate_limited), + httpretty.Response(body=DataResponses.get_ok), + ] + ) + self.datasource.get_data() + self.assertEqual(self.datasource.get_userdata_raw(), + DataResponses.FAKE_USER_DATA) + self.assertEqual(sleep.call_count, 2) diff --git a/tools/ds-identify b/tools/ds-identify index 7c8b144b..33bd2991 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -112,7 +112,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS" +OVF SmartOS Scaleway" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -896,6 +896,22 @@ dscheck_None() { return ${DS_NOT_FOUND} } +dscheck_Scaleway() { + if [ "${DI_DMI_SYS_VENDOR}" = "Scaleway" ]; then + return $DS_FOUND + fi + + case " ${DI_KERNEL_CMDLINE} " in + *\ scaleway\ *) return ${DS_FOUND};; + esac + + if [ -f ${PATH_ROOT}/var/run/scaleway ]; then + return ${DS_FOUND} + fi + + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From ebdbf30c0274f078f7a66f6dc9efc8a22a220757 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 17 Jul 2017 13:48:19 -0400 Subject: tests: Add initial tests for EC2 and improve a docstring. EC2 was the original, but this adds some initial tests for that datasource. Also updates a docstring for an internal method. --- cloudinit/sources/DataSourceEc2.py | 14 +- tests/unittests/test_datasource/test_ec2.py | 202 ++++++++++++++++++++++++++++ 2 files changed, 212 insertions(+), 4 deletions(-) create mode 100644 tests/unittests/test_datasource/test_ec2.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index 9e2fdc0a..4ec9592f 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -316,10 +316,16 @@ def identify_platform(): def _collect_platform_data(): - # returns a dictionary with all lower case values: - # uuid: system-uuid from dmi or /sys/hypervisor - # uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' - # serial: dmi 'system-serial-number' (/sys/.../product_serial) + """Returns a dictionary of platform info from dmi or /sys/hypervisor. + + Keys in the dictionary are as follows: + uuid: system-uuid from dmi or /sys/hypervisor + uuid_source: 'hypervisor' (/sys/hypervisor/uuid) or 'dmi' + serial: dmi 'system-serial-number' (/sys/.../product_serial) + + On Ec2 instances experimentation is that product_serial is upper case, + and product_uuid is lower case. This returns lower case values for both. + """ data = {} try: uuid = util.load_file("/sys/hypervisor/uuid").strip() diff --git a/tests/unittests/test_datasource/test_ec2.py b/tests/unittests/test_datasource/test_ec2.py new file mode 100644 index 00000000..12230ae2 --- /dev/null +++ b/tests/unittests/test_datasource/test_ec2.py @@ -0,0 +1,202 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +import httpretty +import mock + +from .. import helpers as test_helpers +from cloudinit import helpers +from cloudinit.sources import DataSourceEc2 as ec2 + + +# collected from api version 2009-04-04/ with +# python3 -c 'import json +# from cloudinit.ec2_utils import get_instance_metadata as gm +# print(json.dumps(gm("2009-04-04"), indent=1, sort_keys=True))' +DEFAULT_METADATA = { + "ami-id": "ami-80861296", + "ami-launch-index": "0", + "ami-manifest-path": "(unknown)", + "block-device-mapping": {"ami": "/dev/sda1", "root": "/dev/sda1"}, + "hostname": "ip-10-0-0-149", + "instance-action": "none", + "instance-id": "i-0052913950685138c", + "instance-type": "t2.micro", + "local-hostname": "ip-10-0-0-149", + "local-ipv4": "10.0.0.149", + "placement": {"availability-zone": "us-east-1b"}, + "profile": "default-hvm", + "public-hostname": "", + "public-ipv4": "107.23.188.247", + "public-keys": {"brickies": ["ssh-rsa AAAAB3Nz....w== brickies"]}, + "reservation-id": "r-00a2c173fb5782a08", + "security-groups": "wide-open" +} + + +def _register_ssh_keys(rfunc, base_url, keys_data): + """handle ssh key inconsistencies. + + public-keys in the ec2 metadata is inconsistently formatted compared + to other entries. + Given keys_data of {name1: pubkey1, name2: pubkey2} + + This registers the following urls: + base_url 0={name1}\n1={name2} # (for each name) + base_url/ 0={name1}\n1={name2} # (for each name) + base_url/0 openssh-key + base_url/0/ openssh-key + base_url/0/openssh-key {pubkey1} + base_url/0/openssh-key/ {pubkey1} + ... + """ + + base_url = base_url.rstrip("/") + odd_index = '\n'.join( + ["{0}={1}".format(n, name) + for n, name in enumerate(sorted(keys_data))]) + + rfunc(base_url, odd_index) + rfunc(base_url + "/", odd_index) + + for n, name in enumerate(sorted(keys_data)): + val = keys_data[name] + if isinstance(val, list): + val = '\n'.join(val) + burl = base_url + "/%s" % n + rfunc(burl, "openssh-key") + rfunc(burl + "/", "openssh-key") + rfunc(burl + "/%s/openssh-key" % name, val) + rfunc(burl + "/%s/openssh-key/" % name, val) + + +def register_mock_metaserver(base_url, data): + """Register with httpretty a ec2 metadata like service serving 'data'. + + If given a dictionary, it will populate urls under base_url for + that dictionary. For example, input of + {"instance-id": "i-abc", "mac": "00:16:3e:00:00:00"} + populates + base_url with 'instance-id\nmac' + base_url/ with 'instance-id\nmac' + base_url/instance-id with i-abc + base_url/mac with 00:16:3e:00:00:00 + In the index, references to lists or dictionaries have a trailing /. + """ + def register_helper(register, base_url, body): + base_url = base_url.rstrip("/") + if isinstance(body, str): + register(base_url, body) + elif isinstance(body, list): + register(base_url, '\n'.join(body) + '\n') + register(base_url + '/', '\n'.join(body) + '\n') + elif isinstance(body, dict): + vals = [] + for k, v in body.items(): + if k == 'public-keys': + _register_ssh_keys( + register, base_url + '/public-keys/', v) + continue + suffix = k.rstrip("/") + if not isinstance(v, (str, list)): + suffix += "/" + vals.append(suffix) + url = base_url + '/' + suffix + register_helper(register, url, v) + register(base_url, '\n'.join(vals) + '\n') + register(base_url + '/', '\n'.join(vals) + '\n') + elif body is None: + register(base_url, 'not found', status_code=404) + + def myreg(*argc, **kwargs): + # print("register_url(%s, %s)" % (argc, kwargs)) + return httpretty.register_uri(httpretty.GET, *argc, **kwargs) + + register_helper(myreg, base_url, data) + + +class TestEc2(test_helpers.HttprettyTestCase): + valid_platform_data = { + 'uuid': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + 'uuid_source': 'dmi', + 'serial': 'ec212f79-87d1-2f1d-588f-d86dc0fd5412', + } + + def setUp(self): + super(TestEc2, self).setUp() + self.metadata_addr = ec2.DataSourceEc2.metadata_urls[0] + self.api_ver = '2009-04-04' + + @property + def metadata_url(self): + return '/'.join([self.metadata_addr, self.api_ver, 'meta-data', '']) + + @property + def userdata_url(self): + return '/'.join([self.metadata_addr, self.api_ver, 'user-data']) + + def _patch_add_cleanup(self, mpath, *args, **kwargs): + p = mock.patch(mpath, *args, **kwargs) + p.start() + self.addCleanup(p.stop) + + def _setup_ds(self, sys_cfg, platform_data, md, ud=None): + distro = {} + paths = helpers.Paths({}) + if sys_cfg is None: + sys_cfg = {} + ds = ec2.DataSourceEc2(sys_cfg=sys_cfg, distro=distro, paths=paths) + if platform_data is not None: + self._patch_add_cleanup( + "cloudinit.sources.DataSourceEc2._collect_platform_data", + return_value=platform_data) + + if md: + register_mock_metaserver(self.metadata_url, md) + register_mock_metaserver(self.userdata_url, ud) + + return ds + + @httpretty.activate + def test_valid_platform_with_strict_true(self): + """Valid platform data should return true with strict_id true.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md=DEFAULT_METADATA) + ret = ds.get_data() + self.assertEqual(True, ret) + + @httpretty.activate + def test_valid_platform_with_strict_false(self): + """Valid platform data should return true with strict_id false.""" + ds = self._setup_ds( + platform_data=self.valid_platform_data, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=DEFAULT_METADATA) + ret = ds.get_data() + self.assertEqual(True, ret) + + @httpretty.activate + def test_unknown_platform_with_strict_true(self): + """Unknown platform data with strict_id true should return False.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': True}}}, + md=DEFAULT_METADATA) + ret = ds.get_data() + self.assertEqual(False, ret) + + @httpretty.activate + def test_unknown_platform_with_strict_false(self): + """Unknown platform data with strict_id false should return True.""" + uuid = 'ab439480-72bf-11d3-91fc-b8aded755F9a' + ds = self._setup_ds( + platform_data={'uuid': uuid, 'uuid_source': 'dmi', 'serial': ''}, + sys_cfg={'datasource': {'Ec2': {'strict_id': False}}}, + md=DEFAULT_METADATA) + ret = ds.get_data() + self.assertEqual(True, ret) + + +# vi: ts=4 expandtab -- cgit v1.2.3