From 644048e31a9509390871a6a5ab49b92a5e6c3b87 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Fri, 9 Feb 2018 09:53:42 -0700 Subject: EC2: Fix get_instance_id called against cached datasource pickle. Fix an issue in EC2 where the datasource.identity had not been initialized before being used when restoring datasource from pickle. This is exposed in upgrade and reboot path. LP: #1748354 --- cloudinit/sources/DataSourceEc2.py | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py index e14553b3..21e9ef84 100644 --- a/cloudinit/sources/DataSourceEc2.py +++ b/cloudinit/sources/DataSourceEc2.py @@ -147,6 +147,12 @@ class DataSourceEc2(sources.DataSource): def get_instance_id(self): if self.cloud_platform == Platforms.AWS: # Prefer the ID from the instance identity document, but fall back + if not getattr(self, 'identity', None): + # If re-using cached datasource, it's get_data run didn't + # setup self.identity. So we need to do that now. + api_version = self.get_metadata_api_version() + self.identity = ec2.get_instance_identity( + api_version, self.metadata_address).get('document', {}) return self.identity.get( 'instanceId', self.metadata['instance-id']) else: -- cgit v1.2.3 From 3814d559c3e973238d819721605c7451e852fe63 Mon Sep 17 00:00:00 2001 From: Sankar Tanguturi Date: Tue, 13 Feb 2018 13:00:53 -0800 Subject: OVF: Fix VMware support for 64-bit platforms. On few 64-bit platforms, the open-vm-tools package is installed at /usr/lib64/. The DataSourceOVF is changed to search look there for the 'customization plugin' --- cloudinit/sources/DataSourceOVF.py | 21 ++++++++++++++++----- tests/unittests/test_ds_identify.py | 10 ++++++++++ tools/ds-identify | 3 ++- 3 files changed, 28 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py index 6e62f984..dc914a72 100644 --- a/cloudinit/sources/DataSourceOVF.py +++ b/cloudinit/sources/DataSourceOVF.py @@ -95,11 +95,20 @@ class DataSourceOVF(sources.DataSource): "VMware Customization support") elif not util.get_cfg_option_bool( self.sys_cfg, "disable_vmware_customization", True): - deployPkgPluginPath = search_file("/usr/lib/vmware-tools", - "libdeployPkgPlugin.so") - if not deployPkgPluginPath: - deployPkgPluginPath = search_file("/usr/lib/open-vm-tools", - "libdeployPkgPlugin.so") + + search_paths = ( + "/usr/lib/vmware-tools", "/usr/lib64/vmware-tools", + "/usr/lib/open-vm-tools", "/usr/lib64/open-vm-tools") + + plugin = "libdeployPkgPlugin.so" + deployPkgPluginPath = None + for path in search_paths: + deployPkgPluginPath = search_file(path, plugin) + if deployPkgPluginPath: + LOG.debug("Found the customization plugin at %s", + deployPkgPluginPath) + break + if deployPkgPluginPath: # When the VM is powered on, the "VMware Tools" daemon # copies the customization specification file to @@ -111,6 +120,8 @@ class DataSourceOVF(sources.DataSource): msg="waiting for configuration file", func=wait_for_imc_cfg_file, args=("cust.cfg", max_wait)) + else: + LOG.debug("Did not find the customization plugin.") if vmwareImcConfigFilePath: LOG.debug("Found VMware Customization Config File at %s", diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 21258347..9be3f964 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -337,6 +337,16 @@ class TestDsIdentify(CiTestCase): """OVF is identified when vmware customization is enabled.""" self._test_ds_found('OVF-vmware-customization') + def test_ovf_on_vmware_iso_found_open_vm_tools_64(self): + """OVF is identified when open-vm-tools installed in /usr/lib64.""" + cust64 = copy.deepcopy(VALID_CFG['OVF-vmware-customization']) + p32 = 'usr/lib/vmware-tools/plugins/vmsvc/libdeployPkgPlugin.so' + open64 = 'usr/lib64/open-vm-tools/plugins/vmsvc/libdeployPkgPlugin.so' + cust64['files'][open64] = cust64['files'][p32] + del cust64['files'][p32] + return self._check_via_dict( + cust64, RC_FOUND, dslist=[cust64.get('ds'), DS_NONE]) + def test_ovf_on_vmware_iso_found_by_cdrom_with_matching_fs_label(self): """OVF is identified by well-known iso9660 labels.""" ovf_cdrom_by_label = copy.deepcopy(VALID_CFG['OVF']) diff --git a/tools/ds-identify b/tools/ds-identify index 5da51bcc..ec368d58 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -650,8 +650,9 @@ ovf_vmware_guest_customization() { # we have to have the plugin to do vmware customization local found="" pkg="" pre="${PATH_ROOT}/usr/lib" + local ppath="plugins/vmsvc/libdeployPkgPlugin.so" for pkg in vmware-tools open-vm-tools; do - if [ -f "$pre/$pkg/plugins/vmsvc/libdeployPkgPlugin.so" ]; then + if [ -f "$pre/$pkg/$ppath" -o -f "${pre}64/$pkg/$ppath" ]; then found="$pkg"; break; fi done -- cgit v1.2.3 From 40e77380e036a24fafe91a63d0cdefada4312348 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 1 Mar 2018 15:39:16 -0700 Subject: GCE: fix reading of user-data that is not base64 encoded. Last set of changes to GCE datasource broke reading of user-data unless the user had base64 encoded their user-data and also set user-data-encoding to 'base64'. This fixes the issue. LP: #1752711 --- cloudinit/sources/DataSourceGCE.py | 15 +++++++-------- tests/unittests/test_datasource/test_gce.py | 20 +++++++++++++++++++- 2 files changed, 26 insertions(+), 9 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index 2da34a99..bebc9918 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -213,16 +213,15 @@ def read_md(address=None, platform_check=True): if md['availability-zone']: md['availability-zone'] = md['availability-zone'].split('/')[-1] - encoding = instance_data.get('user-data-encoding') - if encoding: + if 'user-data' in instance_data: + # instance_data was json, so values are all utf-8 strings. + ud = instance_data['user-data'].encode("utf-8") + encoding = instance_data.get('user-data-encoding') if encoding == 'base64': - md['user-data'] = b64decode(instance_data.get('user-data')) - else: + ud = b64decode(ud) + elif encoding: LOG.warning('unknown user-data-encoding: %s, ignoring', encoding) - - if 'user-data' in md: - ret['user-data'] = md['user-data'] - del md['user-data'] + ret['user-data'] = ud ret['meta-data'] = md ret['success'] = True diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py index f77c2c40..eb3cec42 100644 --- a/tests/unittests/test_datasource/test_gce.py +++ b/tests/unittests/test_datasource/test_gce.py @@ -38,11 +38,20 @@ GCE_META_ENCODING = { 'instance/hostname': 'server.project-baz.local', 'instance/zone': 'baz/bang', 'instance/attributes': { - 'user-data': b64encode(b'/bin/echo baz\n').decode('utf-8'), + 'user-data': b64encode(b'#!/bin/echo baz\n').decode('utf-8'), 'user-data-encoding': 'base64', } } +GCE_USER_DATA_TEXT = { + 'instance/id': '12345', + 'instance/hostname': 'server.project-baz.local', + 'instance/zone': 'baz/bang', + 'instance/attributes': { + 'user-data': '#!/bin/sh\necho hi mom\ntouch /run/up-now\n', + } +} + HEADERS = {'Metadata-Flavor': 'Google'} MD_URL_RE = re.compile( r'http://metadata.google.internal/computeMetadata/v1/.*') @@ -135,7 +144,16 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase): shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0] self.assertEqual(shostname, self.ds.get_hostname()) + def test_userdata_no_encoding(self): + """check that user-data is read.""" + _set_mock_metadata(GCE_USER_DATA_TEXT) + self.ds.get_data() + self.assertEqual( + GCE_USER_DATA_TEXT['instance/attributes']['user-data'].encode(), + self.ds.get_userdata_raw()) + def test_metadata_encoding(self): + """user-data is base64 encoded if user-data-encoding is 'base64'.""" _set_mock_metadata(GCE_META_ENCODING) self.ds.get_data() -- cgit v1.2.3 From ffc6917aa0b97811c1e8503cd4cff9f11c15def1 Mon Sep 17 00:00:00 2001 From: Rémy Léone Date: Thu, 1 Mar 2018 18:23:32 +0100 Subject: Change some list creation and population to literal. This will provide a small performance improvement and shorter code. --- cloudinit/cmd/main.py | 10 ++++------ cloudinit/config/cc_keys_to_console.py | 4 +--- cloudinit/config/cc_ssh_authkey_fingerprints.py | 9 ++++----- cloudinit/distros/arch.py | 5 +---- cloudinit/distros/opensuse.py | 5 ++--- cloudinit/sources/DataSourceOpenNebula.py | 5 +---- cloudinit/stages.py | 3 +-- cloudinit/util.py | 3 +-- 8 files changed, 15 insertions(+), 29 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index d2f1b778..fcddd75c 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -215,12 +215,10 @@ def main_init(name, args): if args.local: deps = [sources.DEP_FILESYSTEM] - early_logs = [] - early_logs.append( - attempt_cmdline_url( - path=os.path.join("%s.d" % CLOUD_CONFIG, - "91_kernel_cmdline_url.cfg"), - network=not args.local)) + early_logs = [attempt_cmdline_url( + path=os.path.join("%s.d" % CLOUD_CONFIG, + "91_kernel_cmdline_url.cfg"), + network=not args.local)] # Cloud-init 'init' stage is broken up into the following sub-stages # 1. Ensure that the init object fetches its config without errors diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py index efedd4ae..aff4010e 100644 --- a/cloudinit/config/cc_keys_to_console.py +++ b/cloudinit/config/cc_keys_to_console.py @@ -63,9 +63,7 @@ def handle(name, cfg, cloud, log, _args): ["ssh-dss"]) try: - cmd = [helper_path] - cmd.append(','.join(fp_blacklist)) - cmd.append(','.join(key_blacklist)) + cmd = [helper_path, ','.join(fp_blacklist), ','.join(key_blacklist)] (stdout, _stderr) = util.subp(cmd) util.multi_log("%s\n" % (stdout.strip()), stderr=False, console=True) diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py index 35d8c57f..98b0e665 100755 --- a/cloudinit/config/cc_ssh_authkey_fingerprints.py +++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py @@ -77,11 +77,10 @@ def _pprint_key_entries(user, key_fn, key_entries, hash_meth='md5', tbl = SimpleTable(tbl_fields) for entry in key_entries: if _is_printable_key(entry): - row = [] - row.append(entry.keytype or '-') - row.append(_gen_fingerprint(entry.base64, hash_meth) or '-') - row.append(entry.options or '-') - row.append(entry.comment or '-') + row = [entry.keytype or '-', + _gen_fingerprint(entry.base64, hash_meth) or '-', + entry.options or '-', + entry.comment or '-'] tbl.add_row(row) authtbl_s = tbl.get_string() authtbl_lines = authtbl_s.splitlines() diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py index f87a3432..b814c8ba 100644 --- a/cloudinit/distros/arch.py +++ b/cloudinit/distros/arch.py @@ -129,11 +129,8 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['pacman'] + cmd = ['pacman', "-Sy", "--quiet", "--noconfirm"] # Redirect output - cmd.append("-Sy") - cmd.append("--quiet") - cmd.append("--noconfirm") if args and isinstance(args, str): cmd.append(args) diff --git a/cloudinit/distros/opensuse.py b/cloudinit/distros/opensuse.py index a219e9fb..162dfa05 100644 --- a/cloudinit/distros/opensuse.py +++ b/cloudinit/distros/opensuse.py @@ -67,11 +67,10 @@ class Distro(distros.Distro): if pkgs is None: pkgs = [] - cmd = ['zypper'] # No user interaction possible, enable non-interactive mode - cmd.append('--non-interactive') + cmd = ['zypper', '--non-interactive'] - # Comand is the operation, such as install + # Command is the operation, such as install if command == 'upgrade': command = 'update' cmd.append(command) diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index ce47b6bd..9450835e 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -173,10 +173,7 @@ class OpenNebulaNetwork(object): def gen_conf(self): global_dns = self.context.get('DNS', "").split() - conf = [] - conf.append('auto lo') - conf.append('iface lo inet loopback') - conf.append('') + conf = ['auto lo', 'iface lo inet loopback', ''] for mac, dev in self.ifaces.items(): mac = mac.lower() diff --git a/cloudinit/stages.py b/cloudinit/stages.py index d0452688..bc4ebc85 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -132,8 +132,7 @@ class Init(object): return initial_dirs def purge_cache(self, rm_instance_lnk=False): - rm_list = [] - rm_list.append(self.paths.boot_finished) + rm_list = [self.paths.boot_finished] if rm_instance_lnk: rm_list.append(self.paths.instance_link) for f in rm_list: diff --git a/cloudinit/util.py b/cloudinit/util.py index 02dc2ce8..b03b80c3 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -716,8 +716,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None): def make_url(scheme, host, port=None, path='', params='', query='', fragment=''): - pieces = [] - pieces.append(scheme or '') + pieces = [scheme or ''] netloc = '' if host: -- cgit v1.2.3 From f891df345afa57c0c7734e8f04cca9a3d5881778 Mon Sep 17 00:00:00 2001 From: Douglas Jordan Date: Sat, 10 Mar 2018 07:20:08 +0100 Subject: This commit fixes get_hostname on the AzureDataSource. LP: #1754495 --- cloudinit/sources/DataSourceAzure.py | 2 ++ tests/unittests/test_datasource/test_azure.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 4bcbf3a4..0bb7fad9 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -223,6 +223,8 @@ DEF_PASSWD_REDACTION = 'REDACTED' def get_hostname(hostname_command='hostname'): + if not isinstance(hostname_command, (list, tuple)): + hostname_command = (hostname_command,) return util.subp(hostname_command, capture=True)[0].strip() diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index 254e9876..da7da0ca 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -643,6 +643,21 @@ fdescfs /dev/fd fdescfs rw 0 0 expected_config['config'].append(blacklist_config) self.assertEqual(netconfig, expected_config) + @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + def test_get_hostname_with_no_args(self, subp): + dsaz.get_hostname() + subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + def test_get_hostname_with_string_arg(self, subp): + dsaz.get_hostname(hostname_command="hostname") + subp.assert_called_once_with(("hostname",), capture=True) + + @mock.patch("cloudinit.sources.DataSourceAzure.util.subp") + def test_get_hostname_with_iterable_arg(self, subp): + dsaz.get_hostname(hostname_command=("hostname",)) + subp.assert_called_once_with(("hostname",), capture=True) + class TestAzureBounce(CiTestCase): -- cgit v1.2.3 From 133ad2cb327ad17b7b81319fac8f9f14577c04df Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Wed, 14 Mar 2018 23:38:07 -0600 Subject: set_hostname: When present in metadata, set it before network bringup. When instance meta-data provides hostname information, run cc_set_hostname in the init-local or init-net stage before network comes up. Prevent an initial DHCP request which leaks the stock cloud-image default hostname before the meta-data provided hostname was processed. A leaked cloud-image hostname adversely affects Dynamic DNS which would reallocate 'ubuntu' hostname in DNS to every instance brought up by cloud-init. These instances would only update DNS to the cloud-init configured hostname upon DHCP lease renewal. This branch extends the get_hostname methods in datasource, cloud and util to limit results to metadata_only to avoid extra cost of querying the distro for hostname information if metadata does not provide that information. LP: #1746455 --- cloudinit/cloud.py | 5 +- cloudinit/cmd/main.py | 25 ++++ cloudinit/cmd/tests/test_main.py | 161 +++++++++++++++++++++ cloudinit/config/cc_set_hostname.py | 41 +++++- cloudinit/sources/__init__.py | 21 ++- cloudinit/sources/tests/test_init.py | 70 ++++++++- cloudinit/tests/test_util.py | 74 ++++++++++ cloudinit/util.py | 17 ++- .../test_handler/test_handler_set_hostname.py | 57 +++++++- 9 files changed, 449 insertions(+), 22 deletions(-) create mode 100644 cloudinit/cmd/tests/test_main.py (limited to 'cloudinit/sources') diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py index ba616781..6d12c437 100644 --- a/cloudinit/cloud.py +++ b/cloudinit/cloud.py @@ -78,8 +78,9 @@ class Cloud(object): def get_locale(self): return self.datasource.get_locale() - def get_hostname(self, fqdn=False): - return self.datasource.get_hostname(fqdn=fqdn) + def get_hostname(self, fqdn=False, metadata_only=False): + return self.datasource.get_hostname( + fqdn=fqdn, metadata_only=metadata_only) def device_name_to_device(self, name): return self.datasource.device_name_to_device(name) diff --git a/cloudinit/cmd/main.py b/cloudinit/cmd/main.py index fcddd75c..3f2dbb93 100644 --- a/cloudinit/cmd/main.py +++ b/cloudinit/cmd/main.py @@ -40,6 +40,7 @@ from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE, from cloudinit import atomic_helper +from cloudinit.config import cc_set_hostname from cloudinit.dhclient_hook import LogDhclient @@ -352,6 +353,11 @@ def main_init(name, args): LOG.debug("[%s] %s will now be targeting instance id: %s. new=%s", mode, name, iid, init.is_new_instance()) + if mode == sources.DSMODE_LOCAL: + # Before network comes up, set any configured hostname to allow + # dhcp clients to advertize this hostname to any DDNS services + # LP: #1746455. + _maybe_set_hostname(init, stage='local', retry_stage='network') init.apply_network_config(bring_up=bool(mode != sources.DSMODE_LOCAL)) if mode == sources.DSMODE_LOCAL: @@ -368,6 +374,7 @@ def main_init(name, args): init.setup_datasource() # update fully realizes user-data (pulling in #include if necessary) init.update() + _maybe_set_hostname(init, stage='init-net', retry_stage='modules:config') # Stage 7 try: # Attempt to consume the data per instance. @@ -681,6 +688,24 @@ def status_wrapper(name, args, data_d=None, link_d=None): return len(v1[mode]['errors']) +def _maybe_set_hostname(init, stage, retry_stage): + """Call set-hostname if metadata, vendordata or userdata provides it. + + @param stage: String representing current stage in which we are running. + @param retry_stage: String represented logs upon error setting hostname. + """ + cloud = init.cloudify() + (hostname, _fqdn) = util.get_hostname_fqdn( + init.cfg, cloud, metadata_only=True) + if hostname: # meta-data or user-data hostname content + try: + cc_set_hostname.handle('set-hostname', init.cfg, cloud, LOG, None) + except cc_set_hostname.SetHostnameError as e: + LOG.debug( + 'Failed setting hostname in %s stage. Will' + ' retry in %s stage. Error: %s.', stage, retry_stage, str(e)) + + def main_features(name, args): sys.stdout.write('\n'.join(sorted(version.FEATURES)) + '\n') diff --git a/cloudinit/cmd/tests/test_main.py b/cloudinit/cmd/tests/test_main.py new file mode 100644 index 00000000..dbe421c0 --- /dev/null +++ b/cloudinit/cmd/tests/test_main.py @@ -0,0 +1,161 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from collections import namedtuple +import copy +import os +from six import StringIO + +from cloudinit.cmd import main +from cloudinit.util import ( + ensure_dir, load_file, write_file, yaml_dumps) +from cloudinit.tests.helpers import ( + FilesystemMockingTestCase, wrap_and_call) + +mypaths = namedtuple('MyPaths', 'run_dir') +myargs = namedtuple('MyArgs', 'debug files force local reporter subcommand') + + +class TestMain(FilesystemMockingTestCase): + + with_logs = True + + def setUp(self): + super(TestMain, self).setUp() + self.new_root = self.tmp_dir() + self.cloud_dir = self.tmp_path('var/lib/cloud/', dir=self.new_root) + os.makedirs(self.cloud_dir) + self.replicateTestRoot('simple_ubuntu', self.new_root) + self.cfg = { + 'datasource_list': ['None'], + 'runcmd': ['ls /etc'], # test ALL_DISTROS + 'system_info': {'paths': {'cloud_dir': self.cloud_dir, + 'run_dir': self.new_root}}, + 'write_files': [ + { + 'path': '/etc/blah.ini', + 'content': 'blah', + 'permissions': 0o755, + }, + ], + 'cloud_init_modules': ['write-files', 'runcmd'], + } + cloud_cfg = yaml_dumps(self.cfg) + ensure_dir(os.path.join(self.new_root, 'etc', 'cloud')) + self.cloud_cfg_file = os.path.join( + self.new_root, 'etc', 'cloud', 'cloud.cfg') + write_file(self.cloud_cfg_file, cloud_cfg) + self.patchOS(self.new_root) + self.patchUtils(self.new_root) + self.stderr = StringIO() + self.patchStdoutAndStderr(stderr=self.stderr) + + def test_main_init_run_net_stops_on_file_no_net(self): + """When no-net file is present, main_init does not process modules.""" + stop_file = os.path.join(self.cloud_dir, 'data', 'no-net') # stop file + write_file(stop_file, '') + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + # We should not run write_files module + self.assertFalse( + os.path.exists(os.path.join(self.new_root, 'etc/blah.ini')), + 'Unexpected run of write_files module produced blah.ini') + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertFalse( + os.path.exists(os.path.join(self.new_root, instance_id_path)), + 'Unexpected call to datasource.instancify produced instance-id') + expected_logs = [ + "Exiting. stop file ['{stop_file}'] existed\n".format( + stop_file=stop_file), + 'my net debug info' # netinfo.debug_info + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_runs_modules(self): + """Modules like write_files are run in 'net' mode.""" + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + + def test_main_init_run_net_calls_set_hostname_when_metadata_present(self): + """When local-hostname metadata is present, call cc_set_hostname.""" + self.cfg['datasource'] = { + 'None': {'metadata': {'local-hostname': 'md-hostname'}}} + cloud_cfg = yaml_dumps(self.cfg) + write_file(self.cloud_cfg_file, cloud_cfg) + cmdargs = myargs( + debug=False, files=None, force=False, local=False, reporter=None, + subcommand='init') + + def set_hostname(name, cfg, cloud, log, args): + self.assertEqual('set-hostname', name) + updated_cfg = copy.deepcopy(self.cfg) + updated_cfg.update( + {'def_log_file': '/var/log/cloud-init.log', + 'log_cfgs': [], + 'syslog_fix_perms': ['syslog:adm', 'root:adm', 'root:wheel'], + 'vendor_data': {'enabled': True, 'prefix': []}}) + updated_cfg.pop('system_info') + + self.assertEqual(updated_cfg, cfg) + self.assertEqual(main.LOG, log) + self.assertIsNone(args) + + (item1, item2) = wrap_and_call( + 'cloudinit.cmd.main', + {'util.close_stdin': True, + 'netinfo.debug_info': 'my net debug info', + 'cc_set_hostname.handle': {'side_effect': set_hostname}, + 'util.fixup_output': ('outfmt', 'errfmt')}, + main.main_init, 'init', cmdargs) + self.assertEqual([], item2) + # Instancify is called + instance_id_path = 'var/lib/cloud/data/instance-id' + self.assertEqual( + 'iid-datasource-none\n', + os.path.join(load_file( + os.path.join(self.new_root, instance_id_path)))) + # modules are run (including write_files) + self.assertEqual( + 'blah', load_file(os.path.join(self.new_root, 'etc/blah.ini'))) + expected_logs = [ + 'network config is disabled by fallback', # apply_network_config + 'my net debug info', # netinfo.debug_info + 'no previous run detected' + ] + for log in expected_logs: + self.assertIn(log, self.stderr.getvalue()) + +# vi: ts=4 expandtab diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py index aa3dfe5f..3d2b2da3 100644 --- a/cloudinit/config/cc_set_hostname.py +++ b/cloudinit/config/cc_set_hostname.py @@ -32,22 +32,51 @@ will be used. hostname: """ +import os + + +from cloudinit.atomic_helper import write_json from cloudinit import util +class SetHostnameError(Exception): + """Raised when the distro runs into an exception when setting hostname. + + This may happen if we attempt to set the hostname early in cloud-init's + init-local timeframe as certain services may not be running yet. + """ + pass + + def handle(name, cfg, cloud, log, _args): if util.get_cfg_option_bool(cfg, "preserve_hostname", False): log.debug(("Configuration option 'preserve_hostname' is set," " not setting the hostname in module %s"), name) return - (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud) + # Check for previous successful invocation of set-hostname + + # set-hostname artifact file accounts for both hostname and fqdn + # deltas. As such, it's format is different than cc_update_hostname's + # previous-hostname file which only contains the base hostname. + # TODO consolidate previous-hostname and set-hostname artifact files and + # distro._read_hostname implementation so we only validate one artifact. + prev_fn = os.path.join(cloud.get_cpath('data'), "set-hostname") + prev_hostname = {} + if os.path.exists(prev_fn): + prev_hostname = util.load_json(util.load_file(prev_fn)) + hostname_changed = (hostname != prev_hostname.get('hostname') or + fqdn != prev_hostname.get('fqdn')) + if not hostname_changed: + log.debug('No hostname changes. Skipping set-hostname') + return + log.debug("Setting the hostname to %s (%s)", fqdn, hostname) try: - log.debug("Setting the hostname to %s (%s)", fqdn, hostname) cloud.distro.set_hostname(hostname, fqdn) - except Exception: - util.logexc(log, "Failed to set the hostname to %s (%s)", fqdn, - hostname) - raise + except Exception as e: + msg = "Failed to set the hostname to %s (%s)" % (fqdn, hostname) + util.logexc(log, msg) + raise SetHostnameError("%s: %s" % (msg, e)) + write_json(prev_fn, {'hostname': hostname, 'fqdn': fqdn}) # vi: ts=4 expandtab diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py index a05ca2f6..df0b374a 100644 --- a/cloudinit/sources/__init__.py +++ b/cloudinit/sources/__init__.py @@ -276,21 +276,34 @@ class DataSource(object): return "iid-datasource" return str(self.metadata['instance-id']) - def get_hostname(self, fqdn=False, resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): + """Get hostname or fqdn from the datasource. Look it up if desired. + + @param fqdn: Boolean, set True to return hostname with domain. + @param resolve_ip: Boolean, set True to attempt to resolve an ipv4 + address provided in local-hostname meta-data. + @param metadata_only: Boolean, set True to avoid looking up hostname + if meta-data doesn't have local-hostname present. + + @return: hostname or qualified hostname. Optionally return None when + metadata_only is True and local-hostname data is not available. + """ defdomain = "localdomain" defhost = "localhost" domain = defdomain if not self.metadata or 'local-hostname' not in self.metadata: + if metadata_only: + return None # this is somewhat questionable really. # the cloud datasource was asked for a hostname # and didn't have one. raising error might be more appropriate # but instead, basically look up the existing hostname toks = [] hostname = util.get_hostname() - fqdn = util.get_fqdn_from_hosts(hostname) - if fqdn and fqdn.find(".") > 0: - toks = str(fqdn).split(".") + hosts_fqdn = util.get_fqdn_from_hosts(hostname) + if hosts_fqdn and hosts_fqdn.find(".") > 0: + toks = str(hosts_fqdn).split(".") elif hostname and hostname.find(".") > 0: toks = str(hostname).split(".") elif hostname: diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index af151154..5065083c 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -7,7 +7,7 @@ import stat from cloudinit.helpers import Paths from cloudinit.sources import ( INSTANCE_JSON_FILE, DataSource) -from cloudinit.tests.helpers import CiTestCase, skipIf +from cloudinit.tests.helpers import CiTestCase, skipIf, mock from cloudinit.user_data import UserDataProcessor from cloudinit import util @@ -108,6 +108,74 @@ class TestDataSource(CiTestCase): self.assertEqual('userdata_raw', datasource.userdata_raw) self.assertEqual('vendordata_raw', datasource.vendordata_raw) + def test_get_hostname_strips_local_hostname_without_domain(self): + """Datasource.get_hostname strips metadata local-hostname of domain.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + self.assertEqual( + 'test-subclass-hostname', datasource.metadata['local-hostname']) + self.assertEqual('test-subclass-hostname', datasource.get_hostname()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual('hostname', datasource.get_hostname()) + + def test_get_hostname_with_fqdn_returns_local_hostname_with_domain(self): + """Datasource.get_hostname with fqdn set gets qualified hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertTrue(datasource.get_data()) + datasource.metadata['local-hostname'] = 'hostname.my.domain.com' + self.assertEqual( + 'hostname.my.domain.com', datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_uses_system_hostname(self): + """Datasource.gethostname runs util.get_hostname when no metadata.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = None # No maching fqdn in /etc/hosts + self.assertEqual('systemhostname', datasource.get_hostname()) + self.assertEqual( + 'systemhostname.domain.com', + datasource.get_hostname(fqdn=True)) + + def test_get_hostname_without_metadata_returns_none(self): + """Datasource.gethostname returns None when metadata_only and no MD.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + self.assertIsNone(datasource.get_hostname(metadata_only=True)) + self.assertIsNone( + datasource.get_hostname(fqdn=True, metadata_only=True)) + self.assertEqual([], m_gethost.call_args_list) + self.assertEqual([], m_fqdn.call_args_list) + + def test_get_hostname_without_metadata_prefers_etc_hosts(self): + """Datasource.gethostname prefers /etc/hosts to util.get_hostname.""" + tmp = self.tmp_dir() + datasource = DataSourceTestSubclassNet( + self.sys_cfg, self.distro, Paths({'run_dir': tmp})) + self.assertEqual({}, datasource.metadata) + mock_fqdn = 'cloudinit.sources.util.get_fqdn_from_hosts' + with mock.patch('cloudinit.sources.util.get_hostname') as m_gethost: + with mock.patch(mock_fqdn) as m_fqdn: + m_gethost.return_value = 'systemhostname.domain.com' + m_fqdn.return_value = 'fqdnhostname.domain.com' + self.assertEqual('fqdnhostname', datasource.get_hostname()) + self.assertEqual('fqdnhostname.domain.com', + datasource.get_hostname(fqdn=True)) + def test_get_data_write_json_instance_data(self): """get_data writes INSTANCE_JSON_FILE to run_dir as readonly root.""" tmp = self.tmp_dir() diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index c3e2e404..d30643dc 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -16,6 +16,25 @@ MOUNT_INFO = [ ] +class FakeCloud(object): + + def __init__(self, hostname, fqdn): + self.hostname = hostname + self.fqdn = fqdn + self.calls = [] + + def get_hostname(self, fqdn=None, metadata_only=None): + myargs = {} + if fqdn is not None: + myargs['fqdn'] = fqdn + if metadata_only is not None: + myargs['metadata_only'] = metadata_only + self.calls.append(myargs) + if fqdn: + return self.fqdn + return self.hostname + + class TestUtil(CiTestCase): def test_parse_mount_info_no_opts_no_arg(self): @@ -67,3 +86,58 @@ class TestShellify(CiTestCase): "'echo' 'hi' 'sis'", ""]), util.shellify(["echo hi mom", ["echo", "hi dad"], ('echo', 'hi', 'sis')])) + + +class TestGetHostnameFqdn(CiTestCase): + + def test_get_hostname_fqdn_from_only_cfg_fqdn(self): + """When cfg only has the fqdn key, derive hostname and fqdn from it.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com'}, cloud=None) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_fqdn_and_hostname(self): + """When cfg has both fqdn and hostname keys, return them.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'fqdn': 'myhost.domain.com', 'hostname': 'other'}, cloud=None) + self.assertEqual('other', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_with_domain(self): + """When cfg has only hostname key which represents a fqdn, use that.""" + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost.domain.com'}, cloud=None) + self.assertEqual('myhost', hostname) + self.assertEqual('myhost.domain.com', fqdn) + + def test_get_hostname_fqdn_from_cfg_hostname_without_domain(self): + """When cfg has a hostname without a '.' query cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={'hostname': 'myhost'}, cloud=mycloud) + self.assertEqual('myhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}], mycloud.calls) + + def test_get_hostname_fqdn_from_without_fqdn_or_hostname(self): + """When cfg has neither hostname nor fqdn cloud.get_hostname.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn(cfg={}, cloud=mycloud) + self.assertEqual('cloudhost', hostname) + self.assertEqual('cloudhost.mycloud.com', fqdn) + self.assertEqual( + [{'fqdn': True, 'metadata_only': False}, + {'metadata_only': False}], mycloud.calls) + + def test_get_hostname_fqdn_from_passes_metadata_only_to_cloud(self): + """Calls to cloud.get_hostname pass the metadata_only parameter.""" + mycloud = FakeCloud('cloudhost', 'cloudhost.mycloud.com') + hostname, fqdn = util.get_hostname_fqdn( + cfg={}, cloud=mycloud, metadata_only=True) + self.assertEqual( + [{'fqdn': True, 'metadata_only': True}, + {'metadata_only': True}], mycloud.calls) + +# vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index 083a8efe..4504f053 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1025,9 +1025,16 @@ def dos2unix(contents): return contents.replace('\r\n', '\n') -def get_hostname_fqdn(cfg, cloud): - # return the hostname and fqdn from 'cfg'. If not found in cfg, - # then fall back to data from cloud +def get_hostname_fqdn(cfg, cloud, metadata_only=False): + """Get hostname and fqdn from config if present and fallback to cloud. + + @param cfg: Dictionary of merged user-data configuration (from init.cfg). + @param cloud: Cloud instance from init.cloudify(). + @param metadata_only: Boolean, set True to only query cloud meta-data, + returning None if not present in meta-data. + @return: a Tuple of strings , . Values can be none when + metadata_only is True and no cfg or metadata provides hostname info. + """ if "fqdn" in cfg: # user specified a fqdn. Default hostname then is based off that fqdn = cfg['fqdn'] @@ -1041,11 +1048,11 @@ def get_hostname_fqdn(cfg, cloud): else: # no fqdn set, get fqdn from cloud. # get hostname from cfg if available otherwise cloud - fqdn = cloud.get_hostname(fqdn=True) + fqdn = cloud.get_hostname(fqdn=True, metadata_only=metadata_only) if "hostname" in cfg: hostname = cfg['hostname'] else: - hostname = cloud.get_hostname() + hostname = cloud.get_hostname(metadata_only=metadata_only) return (hostname, fqdn) diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py index abdc17e7..d09ec23a 100644 --- a/tests/unittests/test_handler/test_handler_set_hostname.py +++ b/tests/unittests/test_handler/test_handler_set_hostname.py @@ -11,6 +11,7 @@ from cloudinit.tests import helpers as t_help from configobj import ConfigObj import logging +import os import shutil from six import BytesIO import tempfile @@ -19,14 +20,18 @@ LOG = logging.getLogger(__name__) class TestHostname(t_help.FilesystemMockingTestCase): + + with_logs = True + def setUp(self): super(TestHostname, self).setUp() self.tmp = tempfile.mkdtemp() + util.ensure_dir(os.path.join(self.tmp, 'data')) self.addCleanup(shutil.rmtree, self.tmp) def _fetch_distro(self, kind): cls = distros.fetch(kind) - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) return cls(kind, {}, paths) def test_write_hostname_rhel(self): @@ -34,7 +39,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('rhel') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -51,7 +56,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.yahoo.com', } distro = self._fetch_distro('debian') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -65,7 +70,7 @@ class TestHostname(t_help.FilesystemMockingTestCase): 'hostname': 'blah.blah.blah.suse.com', } distro = self._fetch_distro('sles') - paths = helpers.Paths({}) + paths = helpers.Paths({'cloud_dir': self.tmp}) ds = None cc = cloud.Cloud(ds, paths, {}, distro, None) self.patchUtils(self.tmp) @@ -74,4 +79,48 @@ class TestHostname(t_help.FilesystemMockingTestCase): contents = util.load_file(distro.hostname_conf_fn) self.assertEqual('blah', contents.strip()) + def test_multiple_calls_skips_unchanged_hostname(self): + """Only new hostname or fqdn values will generate a hostname call.""" + distro = self._fetch_distro('debian') + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname1', contents.strip()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertIn( + 'DEBUG: No hostname changes. Skipping set-hostname\n', + self.logs.getvalue()) + cc_set_hostname.handle( + 'cc_set_hostname', {'hostname': 'hostname2.me.com'}, cc, LOG, []) + contents = util.load_file("/etc/hostname") + self.assertEqual('hostname2', contents.strip()) + self.assertIn( + 'Non-persistently setting the system hostname to hostname2', + self.logs.getvalue()) + + def test_error_on_distro_set_hostname_errors(self): + """Raise SetHostnameError on exceptions from distro.set_hostname.""" + distro = self._fetch_distro('debian') + + def set_hostname_error(hostname, fqdn): + raise Exception("OOPS on: %s" % fqdn) + + distro.set_hostname = set_hostname_error + paths = helpers.Paths({'cloud_dir': self.tmp}) + ds = None + cc = cloud.Cloud(ds, paths, {}, distro, None) + self.patchUtils(self.tmp) + with self.assertRaises(cc_set_hostname.SetHostnameError) as ctx_mgr: + cc_set_hostname.handle( + 'somename', {'hostname': 'hostname1.me.com'}, cc, LOG, []) + self.assertEqual( + 'Failed to set the hostname to hostname1.me.com (hostname1):' + ' OOPS on: hostname1.me.com', + str(ctx_mgr.exception)) + # vi: ts=4 expandtab -- cgit v1.2.3 From e88e35483e373b39b4485f30f7a867f50571027c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 19 Mar 2018 14:50:54 -0400 Subject: Add Hetzner Cloud DataSource The Hetzner Cloud metadata service is an AWS-style service available over HTTP via the link local address 169.254.169.254. https://hetzner.com/cloud https://docs.hetzner.cloud/ --- cloudinit/apport.py | 6 +- cloudinit/settings.py | 1 + cloudinit/sources/DataSourceHetzner.py | 100 ++++++++++++++++++++++++ cloudinit/sources/helpers/hetzner.py | 26 ++++++ tests/unittests/test_datasource/test_common.py | 2 + tests/unittests/test_datasource/test_hetzner.py | 99 +++++++++++++++++++++++ tests/unittests/test_ds_identify.py | 9 +++ tools/ds-identify | 7 +- 8 files changed, 246 insertions(+), 4 deletions(-) create mode 100644 cloudinit/sources/DataSourceHetzner.py create mode 100644 cloudinit/sources/helpers/hetzner.py create mode 100644 tests/unittests/test_datasource/test_hetzner.py (limited to 'cloudinit/sources') diff --git a/cloudinit/apport.py b/cloudinit/apport.py index 221f341c..618b0160 100644 --- a/cloudinit/apport.py +++ b/cloudinit/apport.py @@ -14,9 +14,9 @@ except ImportError: KNOWN_CLOUD_NAMES = [ 'Amazon - Ec2', 'AliYun', 'AltCloud', 'Azure', 'Bigstep', 'CloudSigma', - 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', 'MAAS', - 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', 'Scaleway', 'SmartOS', - 'VMware', 'Other'] + 'CloudStack', 'DigitalOcean', 'GCE - Google Compute Engine', + 'Hetzner Cloud', 'MAAS', 'NoCloud', 'OpenNebula', 'OpenStack', 'OVF', + 'Scaleway', 'SmartOS', 'VMware', 'Other'] # Potentially clear text collected logs CLOUDINIT_LOG = '/var/log/cloud-init.log' diff --git a/cloudinit/settings.py b/cloudinit/settings.py index c120498f..5fe749d4 100644 --- a/cloudinit/settings.py +++ b/cloudinit/settings.py @@ -36,6 +36,7 @@ CFG_BUILTIN = { 'SmartOS', 'Bigstep', 'Scaleway', + 'Hetzner', # At the end to act as a 'catch' when none of the above work... 'None', ], diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py new file mode 100644 index 00000000..769fe131 --- /dev/null +++ b/cloudinit/sources/DataSourceHetzner.py @@ -0,0 +1,100 @@ +# Author: Jonas Keidel +# Author: Markus Schade +# +# This file is part of cloud-init. See LICENSE file for license information. +# +"""Hetzner Cloud API Documentation. + https://docs.hetzner.cloud/""" + +from cloudinit import log as logging +from cloudinit import net as cloudnet +from cloudinit import sources +from cloudinit import util + +import cloudinit.sources.helpers.hetzner as hc_helper + +LOG = logging.getLogger(__name__) + +BASE_URL_V1 = 'http://169.254.169.254/hetzner/v1' + +BUILTIN_DS_CONFIG = { + 'metadata_url': BASE_URL_V1 + '/metadata', + 'userdata_url': BASE_URL_V1 + '/userdata', +} + +MD_RETRIES = 60 +MD_TIMEOUT = 2 +MD_WAIT_RETRY = 2 + + +class DataSourceHetzner(sources.DataSource): + def __init__(self, sys_cfg, distro, paths): + sources.DataSource.__init__(self, sys_cfg, distro, paths) + self.distro = distro + self.metadata = dict() + self.ds_cfg = util.mergemanydict([ + util.get_cfg_by_path(sys_cfg, ["datasource", "Hetzner"], {}), + BUILTIN_DS_CONFIG]) + self.metadata_address = self.ds_cfg['metadata_url'] + self.userdata_address = self.ds_cfg['userdata_url'] + self.retries = self.ds_cfg.get('retries', MD_RETRIES) + self.timeout = self.ds_cfg.get('timeout', MD_TIMEOUT) + self.wait_retry = self.ds_cfg.get('wait_retry', MD_WAIT_RETRY) + self._network_config = None + self.dsmode = sources.DSMODE_NETWORK + + def get_data(self): + nic = cloudnet.find_fallback_nic() + with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, + "169.254.255.255"): + md = hc_helper.read_metadata( + self.metadata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) + ud = hc_helper.read_userdata( + self.userdata_address, timeout=self.timeout, + sec_between=self.wait_retry, retries=self.retries) + + self.userdata_raw = ud + self.metadata_full = md + + """hostname is name provided by user at launch. The API enforces + it is a valid hostname, but it is not guaranteed to be resolvable + in dns or fully qualified.""" + self.metadata['instance-id'] = md['instance-id'] + self.metadata['local-hostname'] = md['hostname'] + self.metadata['network-config'] = md.get('network-config', None) + self.metadata['public-keys'] = md.get('public-keys', None) + self.vendordata_raw = md.get("vendor_data", None) + + return True + + @property + def network_config(self): + """Configure the networking. This needs to be done each boot, since + the IP information may have changed due to snapshot and/or + migration. + """ + + if self._network_config: + return self._network_config + + _net_config = self.metadata['network-config'] + if not _net_config: + raise Exception("Unable to get meta-data from server....") + + self._network_config = _net_config + + return self._network_config + + +# Used to match classes to dependencies +datasources = [ + (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + +# vi: ts=4 expandtab diff --git a/cloudinit/sources/helpers/hetzner.py b/cloudinit/sources/helpers/hetzner.py new file mode 100644 index 00000000..2554530d --- /dev/null +++ b/cloudinit/sources/helpers/hetzner.py @@ -0,0 +1,26 @@ +# Author: Jonas Keidel +# Author: Markus Schade +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit import log as logging +from cloudinit import url_helper +from cloudinit import util + +LOG = logging.getLogger(__name__) + + +def read_metadata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read metadata at %s" % url) + return util.load_yaml(response.contents.decode()) + + +def read_userdata(url, timeout=2, sec_between=2, retries=30): + response = url_helper.readurl(url, timeout=timeout, + sec_between=sec_between, retries=retries) + if not response.ok(): + raise RuntimeError("unable to read userdata at %s" % url) + return response.contents diff --git a/tests/unittests/test_datasource/test_common.py b/tests/unittests/test_datasource/test_common.py index 80b9c650..6d2dc5b5 100644 --- a/tests/unittests/test_datasource/test_common.py +++ b/tests/unittests/test_datasource/test_common.py @@ -14,6 +14,7 @@ from cloudinit.sources import ( DataSourceDigitalOcean as DigitalOcean, DataSourceEc2 as Ec2, DataSourceGCE as GCE, + DataSourceHetzner as Hetzner, DataSourceMAAS as MAAS, DataSourceNoCloud as NoCloud, DataSourceOpenNebula as OpenNebula, @@ -31,6 +32,7 @@ DEFAULT_LOCAL = [ CloudSigma.DataSourceCloudSigma, ConfigDrive.DataSourceConfigDrive, DigitalOcean.DataSourceDigitalOcean, + Hetzner.DataSourceHetzner, NoCloud.DataSourceNoCloud, OpenNebula.DataSourceOpenNebula, OVF.DataSourceOVF, diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py new file mode 100644 index 00000000..f1d1525e --- /dev/null +++ b/tests/unittests/test_datasource/test_hetzner.py @@ -0,0 +1,99 @@ +# Copyright (C) 2018 Jonas Keidel +# +# Author: Jonas Keidel +# +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceHetzner +from cloudinit import util, settings, helpers + +from cloudinit.tests.helpers import mock, CiTestCase + +METADATA = util.load_yaml(""" +hostname: cloudinit-test +instance-id: 123456 +local-ipv4: '' +network-config: + config: + - mac_address: 96:00:00:08:19:da + name: eth0 + subnets: + - dns_nameservers: + - 213.133.99.99 + - 213.133.100.100 + - 213.133.98.98 + ipv4: true + type: dhcp + type: physical + - name: eth0:0 + subnets: + - address: 2a01:4f8:beef:beef::1/64 + gateway: fe80::1 + ipv6: true + routes: + - gateway: fe80::1%eth0 + netmask: 0 + network: '::' + type: static + type: physical + version: 1 +network-sysconfig: "DEVICE='eth0'\nTYPE=Ethernet\nBOOTPROTO=dhcp\n\ + ONBOOT='yes'\nHWADDR=96:00:00:08:19:da\n\ + IPV6INIT=yes\nIPV6ADDR=2a01:4f8:beef:beef::1/64\n\ + IPV6_DEFAULTGW=fe80::1%eth0\nIPV6_AUTOCONF=no\n\ + DNS1=213.133.99.99\nDNS2=213.133.100.100\n" +public-ipv4: 192.168.0.1 +public-keys: +- ssh-ed25519 \ + AAAAC3Nzac1lZdI1NTE5AaaAIaFrcac0yVITsmRrmueq6MD0qYNKlEvW8O1Ib4nkhmWh \ + test-key@workstation +vendor_data: "test" +""") + +USERDATA = b"""#cloud-config +runcmd: +- [touch, /root/cloud-init-worked ] +""" + + +class TestDataSourceHetzner(CiTestCase): + """ + Test reading the meta-data + """ + def setUp(self): + super(TestDataSourceHetzner, self).setUp() + self.tmp = self.tmp_dir() + + def get_ds(self): + ds = DataSourceHetzner.DataSourceHetzner( + settings.CFG_BUILTIN, None, helpers.Paths({'run_dir': self.tmp})) + return ds + + @mock.patch('cloudinit.net.EphemeralIPv4Network') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') + def test_read_data(self, m_usermd, m_readmd, m_fallback_nic, m_net): + m_readmd.return_value = METADATA.copy() + m_usermd.return_value = USERDATA + m_fallback_nic.return_value = 'eth0' + + ds = self.get_ds() + ret = ds.get_data() + self.assertTrue(ret) + + m_net.assert_called_once_with( + 'eth0', '169.254.0.1', + 16, '169.254.255.255' + ) + + self.assertTrue(m_readmd.called) + + self.assertEqual(METADATA.get('hostname'), ds.get_hostname()) + + self.assertEqual(METADATA.get('public-keys'), + ds.get_public_ssh_keys()) + + self.assertIsInstance(ds.get_public_ssh_keys(), list) + self.assertEqual(ds.get_userdata_raw(), USERDATA) + self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 9be3f964..9c5628e7 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -60,6 +60,7 @@ P_CHASSIS_ASSET_TAG = "sys/class/dmi/id/chassis_asset_tag" P_PRODUCT_NAME = "sys/class/dmi/id/product_name" P_PRODUCT_SERIAL = "sys/class/dmi/id/product_serial" P_PRODUCT_UUID = "sys/class/dmi/id/product_uuid" +P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" @@ -379,6 +380,10 @@ class TestDsIdentify(CiTestCase): """Nocloud seed directory ubuntu core writable""" self._test_ds_found('NoCloud-seed-ubuntu-core') + def test_hetzner_found(self): + """Hetzner cloud is identified in sys_vendor.""" + self._test_ds_found('Hetzner') + def blkid_out(disks=None): """Convert a list of disk dictionaries into blkid content.""" @@ -559,6 +564,10 @@ VALID_CFG = { }, ], }, + 'Hetzner': { + 'ds': 'Hetzner', + 'files': {P_SYS_VENDOR: 'Hetzner\n'}, + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index ec368d58..e3f93c90 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -114,7 +114,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway" +OVF SmartOS Scaleway Hetzner" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -979,6 +979,11 @@ dscheck_Scaleway() { return ${DS_NOT_FOUND} } +dscheck_Hetzner() { + dmi_sys_vendor_is Hetzner && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From 7deec7b6a1fce87dc2d9cd886053804bbc70380e Mon Sep 17 00:00:00 2001 From: Akihiko Ota Date: Tue, 20 Mar 2018 11:41:26 -0600 Subject: OpenNebula: Update network to return v2 config rather than ENI. OpenNebulaNetwork.gen_conf() was previously returning ENI format. This is updated to return netplan/v2 config. The changes here also adds support for IPv6 configuration distributed from OpenNebula and fixes some issues about nameserver information. --- cloudinit/net/network_state.py | 10 + cloudinit/sources/DataSourceOpenNebula.py | 104 +++++--- tests/unittests/test_datasource/test_opennebula.py | 266 ++++++++++++++------- 3 files changed, 261 insertions(+), 119 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/net/network_state.py b/cloudinit/net/network_state.py index 1dd7ded7..6d63e5c5 100644 --- a/cloudinit/net/network_state.py +++ b/cloudinit/net/network_state.py @@ -708,6 +708,7 @@ class NetworkStateInterpreter(object): gateway4 = None gateway6 = None + nameservers = {} for address in cfg.get('addresses', []): subnet = { 'type': 'static', @@ -723,6 +724,15 @@ class NetworkStateInterpreter(object): gateway4 = cfg.get('gateway4') subnet.update({'gateway': gateway4}) + if 'nameservers' in cfg and not nameservers: + addresses = cfg.get('nameservers').get('addresses') + if addresses: + nameservers['dns_nameservers'] = addresses + search = cfg.get('nameservers').get('search') + if search: + nameservers['dns_search'] = search + subnet.update(nameservers) + subnets.append(subnet) routes = [] diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 9450835e..02cb98f7 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -20,7 +20,6 @@ import string from cloudinit import log as logging from cloudinit import net -from cloudinit.net import eni from cloudinit import sources from cloudinit import util @@ -91,15 +90,15 @@ class DataSourceOpenNebula(sources.DataSource): return False self.seed = seed - self.network_eni = results.get('network-interfaces') + self.network = results.get('network-interfaces') self.metadata = md self.userdata_raw = results.get('userdata') return True @property def network_config(self): - if self.network_eni is not None: - return eni.convert_eni_data(self.network_eni) + if self.network is not None: + return self.network else: return None @@ -143,18 +142,42 @@ class OpenNebulaNetwork(object): def mac2network(self, mac): return self.mac2ip(mac).rpartition(".")[0] + ".0" - def get_dns(self, dev): - return self.get_field(dev, "dns", "").split() + def get_nameservers(self, dev): + nameservers = {} + dns = self.get_field(dev, "dns", "").split() + dns.extend(self.context.get('DNS', "").split()) + if dns: + nameservers['addresses'] = dns + search_domain = self.get_field(dev, "search_domain", "").split() + if search_domain: + nameservers['search'] = search_domain + return nameservers - def get_domain(self, dev): - return self.get_field(dev, "domain") + def get_mtu(self, dev): + return self.get_field(dev, "mtu") def get_ip(self, dev, mac): return self.get_field(dev, "ip", self.mac2ip(mac)) + def get_ip6(self, dev): + addresses6 = [] + ip6 = self.get_field(dev, "ip6") + if ip6: + addresses6.append(ip6) + ip6_ula = self.get_field(dev, "ip6_ula") + if ip6_ula: + addresses6.append(ip6_ula) + return addresses6 + + def get_ip6_prefix(self, dev): + return self.get_field(dev, "ip6_prefix_length", "64") + def get_gateway(self, dev): return self.get_field(dev, "gateway") + def get_gateway6(self, dev): + return self.get_field(dev, "gateway6") + def get_mask(self, dev): return self.get_field(dev, "mask", "255.255.255.0") @@ -171,10 +194,11 @@ class OpenNebulaNetwork(object): return default if val in (None, "") else val def gen_conf(self): - global_dns = self.context.get('DNS', "").split() - - conf = ['auto lo', 'iface lo inet loopback', ''] + netconf = {} + netconf['version'] = 2 + netconf['ethernets'] = {} + ethernets = {} for mac, dev in self.ifaces.items(): mac = mac.lower() @@ -182,29 +206,49 @@ class OpenNebulaNetwork(object): # dev stores the current system name. c_dev = self.context_devname.get(mac, dev) - conf.append('auto ' + dev) - conf.append('iface ' + dev + ' inet static') - conf.append(' #hwaddress %s' % mac) - conf.append(' address ' + self.get_ip(c_dev, mac)) - conf.append(' network ' + self.get_network(c_dev, mac)) - conf.append(' netmask ' + self.get_mask(c_dev)) + devconf = {} + + # Set MAC address + devconf['match'] = {'macaddress': mac} + # Set IPv4 address + devconf['addresses'] = [] + mask = self.get_mask(c_dev) + prefix = str(net.mask_to_net_prefix(mask)) + devconf['addresses'].append( + self.get_ip(c_dev, mac) + '/' + prefix) + + # Set IPv6 Global and ULA address + addresses6 = self.get_ip6(c_dev) + if addresses6: + prefix6 = self.get_ip6_prefix(c_dev) + devconf['addresses'].extend( + [i + '/' + prefix6 for i in addresses6]) + + # Set IPv4 default gateway gateway = self.get_gateway(c_dev) if gateway: - conf.append(' gateway ' + gateway) + devconf['gateway4'] = gateway + + # Set IPv6 default gateway + gateway6 = self.get_gateway6(c_dev) + if gateway: + devconf['gateway6'] = gateway6 - domain = self.get_domain(c_dev) - if domain: - conf.append(' dns-search ' + domain) + # Set DNS servers and search domains + nameservers = self.get_nameservers(c_dev) + if nameservers: + devconf['nameservers'] = nameservers - # add global DNS servers to all interfaces - dns = self.get_dns(c_dev) - if global_dns or dns: - conf.append(' dns-nameservers ' + ' '.join(global_dns + dns)) + # Set MTU size + mtu = self.get_mtu(c_dev) + if mtu: + devconf['mtu'] = mtu - conf.append('') + ethernets[dev] = devconf - return "\n".join(conf) + netconf['ethernets'] = ethernets + return(netconf) def find_candidate_devs(): @@ -390,10 +434,10 @@ def read_context_disk_dir(source_dir, asuser=None): except TypeError: LOG.warning("Failed base64 decoding of userdata") - # generate static /etc/network/interfaces + # generate Network Configuration v2 # only if there are any required context variables - # http://opennebula.org/documentation:rel3.8:cong#network_configuration - ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP$', k)] + # http://docs.opennebula.org/5.4/operation/references/template.html#context-section + ipaddr_keys = [k for k in context if re.match(r'^ETH\d+_IP.*$', k)] if ipaddr_keys: onet = OpenNebulaNetwork(context) results['network-interfaces'] = onet.gen_conf() diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 5c3ba012..ab42f344 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,7 +4,6 @@ from cloudinit import helpers from cloudinit.sources import DataSourceOpenNebula as ds from cloudinit import util from cloudinit.tests.helpers import mock, populate_dir, CiTestCase -from textwrap import dedent import os import pwd @@ -33,6 +32,11 @@ HOSTNAME = 'foo.example.com' PUBLIC_IP = '10.0.0.3' MACADDR = '02:00:0a:12:01:01' IP_BY_MACADDR = '10.18.1.1' +IP4_PREFIX = '24' +IP6_GLOBAL = '2001:db8:1:0:400:c0ff:fea8:1ba' +IP6_ULA = 'fd01:dead:beaf:0:400:c0ff:fea8:1ba' +IP6_GW = '2001:db8:1::ffff' +IP6_PREFIX = '48' DS_PATH = "cloudinit.sources.DataSourceOpenNebula" @@ -221,7 +225,9 @@ class TestOpenNebulaDataSource(CiTestCase): results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) # ETH0_IP and ETH0_MAC populate_context_dir( @@ -229,7 +235,9 @@ class TestOpenNebulaDataSource(CiTestCase): results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) # ETH0_IP with empty string and ETH0_MAC # in the case of using Virtual Network contains @@ -239,55 +247,91 @@ class TestOpenNebulaDataSource(CiTestCase): results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue(IP_BY_MACADDR in results['network-interfaces']) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) - # ETH0_NETWORK + # ETH0_MASK populate_context_dir( self.seed_dir, { 'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR, - 'ETH0_NETWORK': '10.18.0.0' + 'ETH0_MASK': '255.255.0.0' }) results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue('10.18.0.0' in results['network-interfaces']) + self.assertTrue( + IP_BY_MACADDR + '/16' in + results['network-interfaces']['ethernets'][dev]['addresses']) - # ETH0_NETWORK with empty string + # ETH0_MASK with empty string populate_context_dir( self.seed_dir, { 'ETH0_IP': IP_BY_MACADDR, 'ETH0_MAC': MACADDR, - 'ETH0_NETWORK': '' + 'ETH0_MASK': '' }) results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue('10.18.1.0' in results['network-interfaces']) + self.assertTrue( + IP_BY_MACADDR + '/' + IP4_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) - # ETH0_MASK + # ETH0_IP6 populate_context_dir( self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': IP6_GLOBAL, 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '255.255.0.0' }) results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue('255.255.0.0' in results['network-interfaces']) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) - # ETH0_MASK with empty string + # ETH0_IP6_ULA populate_context_dir( self.seed_dir, { - 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_ULA + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_MAC': MACADDR, + }) + results = ds.read_context_disk_dir(self.seed_dir) + + self.assertTrue('network-interfaces' in results) + self.assertTrue( + IP6_GLOBAL + '/' + IP6_PREFIX in + results['network-interfaces']['ethernets'][dev]['addresses']) + + # ETH0_IP6 and ETH0_IP6_PREFIX_LENGTH with empty string + populate_context_dir( + self.seed_dir, { + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', 'ETH0_MAC': MACADDR, - 'ETH0_MASK': '' }) results = ds.read_context_disk_dir(self.seed_dir) self.assertTrue('network-interfaces' in results) - self.assertTrue('255.255.255.0' in results['network-interfaces']) + self.assertTrue( + IP6_GLOBAL + '/64' in + results['network-interfaces']['ethernets'][dev]['addresses']) def test_find_candidates(self): def my_devs_with(criteria): @@ -310,108 +354,152 @@ class TestOpenNebulaNetwork(unittest.TestCase): system_nics = ('eth0', 'ens3') - def test_lo(self): - net = ds.OpenNebulaNetwork(context={}, system_nics_by_mac={}) - self.assertEqual(net.gen_conf(), u'''\ -auto lo -iface lo inet loopback -''') - @mock.patch(DS_PATH + ".get_physical_nics_by_mac") def test_eth0(self, m_get_phys_by_mac): for nic in self.system_nics: m_get_phys_by_mac.return_value = {MACADDR: nic} net = ds.OpenNebulaNetwork({}) - self.assertEqual(net.gen_conf(), dedent("""\ - auto lo - iface lo inet loopback - - auto {dev} - iface {dev} inet static - #hwaddress {macaddr} - address 10.18.1.1 - network 10.18.1.0 - netmask 255.255.255.0 - """.format(dev=nic, macaddr=MACADDR))) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/' + IP4_PREFIX]}}} + + self.assertEqual(net.gen_conf(), expected) def test_eth0_override(self): + self.maxDiff = None context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '10.18.1.1', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_MASK': '255.255.0.0', + 'ETH0_DNS': '1.2.3.6 1.2.3.7', 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_DOMAIN': 'example.com', + 'ETH0_GATEWAY6': '', + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': '', + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': '', + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': '', + } + for nic in self.system_nics: + net = ds.OpenNebulaNetwork(context, + system_nics_by_mac={MACADDR: nic}) + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [IP_BY_MACADDR + '/16'], + 'gateway4': '1.2.3.5', + 'gateway6': None, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8']}}}} + + self.assertEqual(expected, net.gen_conf()) + + def test_eth0_v4v6_override(self): + self.maxDiff = None + context = { + 'DNS': '1.2.3.8', 'ETH0_DNS': '1.2.3.6 1.2.3.7', - 'ETH0_MAC': '02:00:0a:12:01:01' + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': IP_BY_MACADDR, + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': IP6_PREFIX, + 'ETH0_IP6_ULA': IP6_ULA, + 'ETH0_MAC': '02:00:0a:12:01:01', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com example.org', } for nic in self.system_nics: - expected = dedent("""\ - auto lo - iface lo inet loopback - - auto {dev} - iface {dev} inet static - #hwaddress {macaddr} - address 10.18.1.1 - network 10.18.0.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 - """).format(dev=nic, macaddr=MACADDR) net = ds.OpenNebulaNetwork(context, system_nics_by_mac={MACADDR: nic}) + + expected = { + 'version': 2, + 'ethernets': { + nic: { + 'match': {'macaddress': MACADDR}, + 'addresses': [ + IP_BY_MACADDR + '/16', + IP6_GLOBAL + '/' + IP6_PREFIX, + IP6_ULA + '/' + IP6_PREFIX], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com', 'example.org']}, + 'mtu': '1280'}}} + self.assertEqual(expected, net.gen_conf()) def test_multiple_nics(self): """Test rendering multiple nics with names that differ from context.""" + self.maxDiff = None MAC_1 = "02:00:0a:12:01:01" MAC_2 = "02:00:0a:12:01:02" context = { 'DNS': '1.2.3.8', - 'ETH0_IP': '10.18.1.1', - 'ETH0_NETWORK': '10.18.0.0', - 'ETH0_MASK': '255.255.0.0', - 'ETH0_GATEWAY': '1.2.3.5', - 'ETH0_DOMAIN': 'example.com', 'ETH0_DNS': '1.2.3.6 1.2.3.7', + 'ETH0_GATEWAY': '1.2.3.5', + 'ETH0_GATEWAY6': IP6_GW, + 'ETH0_IP': '10.18.1.1', + 'ETH0_IP6': IP6_GLOBAL, + 'ETH0_IP6_PREFIX_LENGTH': '', + 'ETH0_IP6_ULA': IP6_ULA, 'ETH0_MAC': MAC_2, - 'ETH3_IP': '10.3.1.3', - 'ETH3_NETWORK': '10.3.0.0', - 'ETH3_MASK': '255.255.0.0', - 'ETH3_GATEWAY': '10.3.0.1', - 'ETH3_DOMAIN': 'third.example.com', + 'ETH0_MASK': '255.255.0.0', + 'ETH0_MTU': '1280', + 'ETH0_NETWORK': '10.18.0.0', + 'ETH0_SEARCH_DOMAIN': 'example.com', 'ETH3_DNS': '10.3.1.2', + 'ETH3_GATEWAY': '10.3.0.1', + 'ETH3_GATEWAY6': '', + 'ETH3_IP': '10.3.1.3', + 'ETH3_IP6': '', + 'ETH3_IP6_PREFIX_LENGTH': '', + 'ETH3_IP6_ULA': '', 'ETH3_MAC': MAC_1, + 'ETH3_MASK': '255.255.0.0', + 'ETH3_MTU': '', + 'ETH3_NETWORK': '10.3.0.0', + 'ETH3_SEARCH_DOMAIN': 'third.example.com third.example.org', } net = ds.OpenNebulaNetwork( context, system_nics_by_mac={MAC_1: 'enp0s25', MAC_2: 'enp1s2'}) - expected = dedent("""\ - auto lo - iface lo inet loopback - - auto enp0s25 - iface enp0s25 inet static - #hwaddress 02:00:0a:12:01:01 - address 10.3.1.3 - network 10.3.0.0 - netmask 255.255.0.0 - gateway 10.3.0.1 - dns-search third.example.com - dns-nameservers 1.2.3.8 10.3.1.2 - - auto enp1s2 - iface enp1s2 inet static - #hwaddress 02:00:0a:12:01:02 - address 10.18.1.1 - network 10.18.0.0 - netmask 255.255.0.0 - gateway 1.2.3.5 - dns-search example.com - dns-nameservers 1.2.3.8 1.2.3.6 1.2.3.7 - """) + expected = { + 'version': 2, + 'ethernets': { + 'enp1s2': { + 'match': {'macaddress': MAC_2}, + 'addresses': [ + '10.18.1.1/16', + IP6_GLOBAL + '/64', + IP6_ULA + '/64'], + 'gateway4': '1.2.3.5', + 'gateway6': IP6_GW, + 'nameservers': { + 'addresses': ['1.2.3.6', '1.2.3.7', '1.2.3.8'], + 'search': ['example.com']}, + 'mtu': '1280'}, + 'enp0s25': { + 'match': {'macaddress': MAC_1}, + 'addresses': ['10.3.1.3/16'], + 'gateway4': '10.3.0.1', + 'gateway6': None, + 'nameservers': { + 'addresses': ['10.3.1.2', '1.2.3.8'], + 'search': [ + 'third.example.com', + 'third.example.org']}}}} self.assertEqual(expected, net.gen_conf()) -- cgit v1.2.3 From 685f9901b820a457912959bdd4f389835e965524 Mon Sep 17 00:00:00 2001 From: Chad Smith Date: Tue, 20 Mar 2018 16:37:36 -0600 Subject: datasources: fix DataSource subclass get_hostname method signature DataSource.get_hostname call signature changed to allow for metadata_only parameter. The metadata_only=True parameter is passed to get_hostname during init-local stage in order to set the system hostname if present in metadata prior to initial network bring up. Fix subclasses of DataSource which have overridden get_hostname to allow for metadata_only param. LP: #1757176 --- cloudinit/sources/DataSourceAliYun.py | 2 +- cloudinit/sources/DataSourceCloudSigma.py | 2 +- cloudinit/sources/DataSourceGCE.py | 2 +- cloudinit/sources/DataSourceOpenNebula.py | 2 +- cloudinit/sources/DataSourceScaleway.py | 2 +- cloudinit/sources/tests/test_init.py | 28 ++++++++++++++++++++++++++++ 6 files changed, 33 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAliYun.py b/cloudinit/sources/DataSourceAliYun.py index 7ac8288d..22279d09 100644 --- a/cloudinit/sources/DataSourceAliYun.py +++ b/cloudinit/sources/DataSourceAliYun.py @@ -22,7 +22,7 @@ class DataSourceAliYun(EC2.DataSourceEc2): super(DataSourceAliYun, self).__init__(sys_cfg, distro, paths) self.seed_dir = os.path.join(paths.seed_dir, "AliYun") - def get_hostname(self, fqdn=False, _resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata.get('hostname', 'localhost.localdomain') def get_public_ssh_keys(self): diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index 4eaad475..c816f349 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -84,7 +84,7 @@ class DataSourceCloudSigma(sources.DataSource): return True - def get_hostname(self, fqdn=False, resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): """ Cleans up and uses the server's name if the latter is set. Otherwise the first part from uuid is being used. diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py index bebc9918..d8162623 100644 --- a/cloudinit/sources/DataSourceGCE.py +++ b/cloudinit/sources/DataSourceGCE.py @@ -90,7 +90,7 @@ class DataSourceGCE(sources.DataSource): public_keys_data = self.metadata['public-keys-data'] return _parse_public_keys(public_keys_data, self.default_user) - def get_hostname(self, fqdn=False, resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): # GCE has long FDQN's and has asked for short hostnames. return self.metadata['local-hostname'].split('.')[0] diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index 02cb98f7..d4a41116 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -102,7 +102,7 @@ class DataSourceOpenNebula(sources.DataSource): else: return None - def get_hostname(self, fqdn=False, resolve_ip=None): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): if resolve_ip is None: if self.dsmode == sources.DSMODE_NETWORK: resolve_ip = True diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index b0b19c93..90056249 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -215,7 +215,7 @@ class DataSourceScaleway(sources.DataSource): def get_public_ssh_keys(self): return [key['key'] for key in self.metadata['ssh_public_keys']] - def get_hostname(self, fqdn=False, resolve_ip=False): + def get_hostname(self, fqdn=False, resolve_ip=False, metadata_only=False): return self.metadata['hostname'] @property diff --git a/cloudinit/sources/tests/test_init.py b/cloudinit/sources/tests/test_init.py index 5065083c..e7fda22a 100644 --- a/cloudinit/sources/tests/test_init.py +++ b/cloudinit/sources/tests/test_init.py @@ -1,10 +1,12 @@ # This file is part of cloud-init. See LICENSE file for license information. +import inspect import os import six import stat from cloudinit.helpers import Paths +from cloudinit import importer from cloudinit.sources import ( INSTANCE_JSON_FILE, DataSource) from cloudinit.tests.helpers import CiTestCase, skipIf, mock @@ -268,3 +270,29 @@ class TestDataSource(CiTestCase): "WARNING: Error persisting instance-data.json: 'utf8' codec can't" " decode byte 0xaa in position 2: invalid start byte", self.logs.getvalue()) + + def test_get_hostname_subclass_support(self): + """Validate get_hostname signature on all subclasses of DataSource.""" + # Use inspect.getfullargspec when we drop py2.6 and py2.7 + get_args = inspect.getargspec # pylint: disable=W1505 + base_args = get_args(DataSource.get_hostname) # pylint: disable=W1505 + # Import all DataSource subclasses so we can inspect them. + modules = util.find_modules(os.path.dirname(os.path.dirname(__file__))) + for loc, name in modules.items(): + mod_locs, _ = importer.find_module(name, ['cloudinit.sources'], []) + if mod_locs: + importer.import_module(mod_locs[0]) + for child in DataSource.__subclasses__(): + if 'Test' in child.dsname: + continue + self.assertEqual( + base_args, + get_args(child.get_hostname), # pylint: disable=W1505 + '%s does not implement DataSource.get_hostname params' + % child) + for grandchild in child.__subclasses__(): + self.assertEqual( + base_args, + get_args(grandchild.get_hostname), # pylint: disable=W1505 + '%s does not implement DataSource.get_hostname params' + % grandchild) -- cgit v1.2.3 From 097a2967abd6c78edfbdc035e7141f2d142f17ae Mon Sep 17 00:00:00 2001 From: Kurt Garloff Date: Fri, 23 Mar 2018 12:31:20 -0400 Subject: Revert the logic of exception_cb in read_url. In commit e9e8616, there was an inversion of the logic of the exception_cb return value meaning, breaking the (network) OpenStack DataSource, which implemented exception_cb as should_retry_cb, returning True when a retry should be done and False when the retry loop should be broken and the exception reraised again immediately. The OpenStack DS was the only user of this callback at the time and not touched by the commit (nor did the commit message mention an intended change), so this almost certainly happened by mistake. These days, we have a second user of the callback in DataSourceScaleway. It uses the new logic, so it needs change if we fix the meaning of the return value. This patch reverts the meaning of url_helper.read_url() execption_cb to the old semantics. It updates the comment and adjusts the Scaleway datasource. The patch has been tested on Open Telekom Cloud (which uses the OpenStack network Datasource) where previously a missing user_data and network_data.json would be retried 6 times each despite them not being present (they are optional!) and the server repsonding with a correct 404. After the patch, boot times are 10s faster, as we no longer pointlessly retry these files. LP: #1702160 LP: #1298921 --- cloudinit/sources/DataSourceScaleway.py | 6 +++--- cloudinit/url_helper.py | 7 ++++--- 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceScaleway.py b/cloudinit/sources/DataSourceScaleway.py index 90056249..e2502b02 100644 --- a/cloudinit/sources/DataSourceScaleway.py +++ b/cloudinit/sources/DataSourceScaleway.py @@ -113,9 +113,9 @@ def query_data_api_once(api_address, timeout, requests_session): retries=0, session=requests_session, # If the error is a HTTP/404 or a ConnectionError, go into raise - # block below. - exception_cb=lambda _, exc: exc.code == 404 or ( - isinstance(exc.cause, requests.exceptions.ConnectionError) + # block below and don't bother retrying. + exception_cb=lambda _, exc: exc.code != 404 and ( + not isinstance(exc.cause, requests.exceptions.ConnectionError) ) ) return util.decode_binary(resp.contents) diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 4e814a5f..36289af5 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -258,9 +258,10 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # ssl exceptions are not going to get fixed by waiting a # few seconds break - if exception_cb and exception_cb(req_args.copy(), excps[-1]): - # if an exception callback was given it should return None - # a true-ish value means to break and re-raise the exception + if exception_cb and not exception_cb(req_args.copy(), excps[-1]): + # if an exception callback was given, it should return True + # to continue retrying and False to break and re-raise the + # exception break if i + 1 < manual_tries and sec_between > 0: LOG.debug("Please wait %s seconds while we wait to try again", -- cgit v1.2.3 From 2d618e27687470a8a3649f44598819bdee8cdb03 Mon Sep 17 00:00:00 2001 From: Douglas Jordan Date: Fri, 23 Mar 2018 12:04:18 -0600 Subject: Reduce AzurePreprovisioning HTTP timeouts. Reducing timeout to 1 second as IMDS responds within a handful of milliseconds. Also get rid of max_retries to prevent exiting out of polling loop early due to IMDS outage / upgrade. Reduce Azure PreProvisioning HTTP timeouts during polling to avoid waiting an extra minute. LP: #1752977 --- cloudinit/sources/DataSourceAzure.py | 31 ++++++++------------------- cloudinit/url_helper.py | 13 ++++++----- tests/unittests/test_datasource/test_azure.py | 22 ++++++------------- 3 files changed, 24 insertions(+), 42 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 0bb7fad9..0ee622e2 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -20,7 +20,7 @@ from cloudinit import net from cloudinit.net.dhcp import EphemeralDHCPv4 from cloudinit import sources from cloudinit.sources.helpers.azure import get_metadata_from_fabric -from cloudinit.url_helper import readurl, wait_for_url, UrlError +from cloudinit.url_helper import readurl, UrlError from cloudinit import util LOG = logging.getLogger(__name__) @@ -49,7 +49,6 @@ DEFAULT_FS = 'ext4' AZURE_CHASSIS_ASSET_TAG = '7783-7084-3265-9085-8269-3286-77' REPROVISION_MARKER_FILE = "/var/lib/cloud/data/poll_imds" IMDS_URL = "http://169.254.169.254/metadata/reprovisiondata" -IMDS_RETRIES = 5 def find_storvscid_from_sysctl_pnpinfo(sysctl_out, deviceid): @@ -451,36 +450,24 @@ class DataSourceAzure(sources.DataSource): headers = {"Metadata": "true"} LOG.debug("Start polling IMDS") - def sleep_cb(response, loop_n): - return 1 - - def exception_cb(msg, exception): + def exc_cb(msg, exception): if isinstance(exception, UrlError) and exception.code == 404: - return - LOG.warning("Exception during polling. Will try DHCP.", - exc_info=True) - + return True # If we get an exception while trying to call IMDS, we # call DHCP and setup the ephemeral network to acquire the new IP. - raise exception + return False need_report = report_ready - for i in range(IMDS_RETRIES): + while True: try: with EphemeralDHCPv4() as lease: if need_report: self._report_ready(lease=lease) need_report = False - wait_for_url([url], max_wait=None, timeout=60, - status_cb=LOG.info, - headers_cb=lambda url: headers, sleep_time=1, - exception_cb=exception_cb, - sleep_time_cb=sleep_cb) - return str(readurl(url, headers=headers)) - except Exception: - LOG.debug("Exception during polling-retrying dhcp" + - " %d more time(s).", (IMDS_RETRIES - i), - exc_info=True) + return readurl(url, timeout=1, headers=headers, + exception_cb=exc_cb, infinite=True).contents + except UrlError: + pass def _report_ready(self, lease): """Tells the fabric provisioning has completed diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py index 36289af5..03a573af 100644 --- a/cloudinit/url_helper.py +++ b/cloudinit/url_helper.py @@ -16,7 +16,7 @@ import time from email.utils import parsedate from functools import partial - +from itertools import count from requests import exceptions from six.moves.urllib.parse import ( @@ -172,7 +172,7 @@ def _get_ssl_args(url, ssl_details): def readurl(url, data=None, timeout=None, retries=0, sec_between=1, headers=None, headers_cb=None, ssl_details=None, check_status=True, allow_redirects=True, exception_cb=None, - session=None): + session=None, infinite=False): url = _cleanurl(url) req_args = { 'url': url, @@ -220,7 +220,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, excps = [] # Handle retrying ourselves since the built-in support # doesn't handle sleeping between tries... - for i in range(0, manual_tries): + # Infinitely retry if infinite is True + for i in count() if infinite else range(0, manual_tries): req_args['headers'] = headers_cb(url) filtered_req_args = {} for (k, v) in req_args.items(): @@ -229,7 +230,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, filtered_req_args[k] = v try: LOG.debug("[%s/%s] open '%s' with %s configuration", i, - manual_tries, url, filtered_req_args) + "infinite" if infinite else manual_tries, url, + filtered_req_args) if session is None: session = requests.Session() @@ -263,7 +265,8 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1, # to continue retrying and False to break and re-raise the # exception break - if i + 1 < manual_tries and sec_between > 0: + if (infinite and sec_between > 0) or \ + (i + 1 < manual_tries and sec_between > 0): LOG.debug("Please wait %s seconds while we wait to try again", sec_between) time.sleep(sec_between) diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py index da7da0ca..3e8b7913 100644 --- a/tests/unittests/test_datasource/test_azure.py +++ b/tests/unittests/test_datasource/test_azure.py @@ -1177,7 +1177,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): url = 'http://{0}/metadata/reprovisiondata?api-version=2017-04-02' host = "169.254.169.254" full_url = url.format(host) - fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf") + fake_resp.return_value = mock.MagicMock(status_code=200, text="ovf", + content="ovf") dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) self.assertTrue(len(dsa._poll_imds()) > 0) self.assertEqual(fake_resp.call_args_list, @@ -1185,13 +1186,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs() - }, method='GET', timeout=60.0, - url=full_url), - mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs() - }, method='GET', url=full_url)]) + }, method='GET', timeout=1, + url=full_url)]) self.assertEqual(m_dhcp.call_count, 1) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', @@ -1217,7 +1213,8 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): username = "myuser" odata = {'HostName': hostname, 'UserName': username} content = construct_valid_ovf_env(data=odata) - fake_resp.return_value = mock.MagicMock(status_code=200, text=content) + fake_resp.return_value = mock.MagicMock(status_code=200, text=content, + content=content) dsa = dsaz.DataSourceAzure({}, distro=None, paths=self.paths) md, ud, cfg, d = dsa._reprovision() self.assertEqual(md['local-hostname'], hostname) @@ -1227,12 +1224,7 @@ class TestAzureDataSourcePreprovisioning(CiTestCase): headers={'Metadata': 'true', 'User-Agent': 'Cloud-Init/%s' % vs()}, - method='GET', timeout=60.0, url=full_url), - mock.call(allow_redirects=True, - headers={'Metadata': 'true', - 'User-Agent': - 'Cloud-Init/%s' % vs()}, - method='GET', url=full_url)]) + method='GET', timeout=1, url=full_url)]) self.assertEqual(m_dhcp.call_count, 1) m_net.assert_any_call( broadcast='192.168.2.255', interface='eth9', ip='192.168.2.9', -- cgit v1.2.3 From e0f644b7c8c76bd63d242558685722cc70d9c51d Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 23 Mar 2018 17:17:55 -0600 Subject: IBMCloud: Initial IBM Cloud datasource. This adds a specific IBM Cloud datasource. IBM Cloud is identified by: a.) running on xen b.) one of a LABEL=METADATA disk or a LABEL=config-2 disk with UUID=9796-932E The datasource contains its own config-drive reader that reads only the currently supported portion of config-drive needed for ibm cloud. During the provisioning boot, cloud-init is disabled. See the docstring in DataSourceIBMCloud.py for more more information. --- cloudinit/sources/DataSourceConfigDrive.py | 10 + cloudinit/sources/DataSourceIBMCloud.py | 325 +++++++++++++++++++++++ cloudinit/tests/test_util.py | 72 +++++ cloudinit/util.py | 31 +++ tests/unittests/test_datasource/test_ibmcloud.py | 262 ++++++++++++++++++ tests/unittests/test_ds_identify.py | 104 +++++++- tools/ds-identify | 65 ++++- 7 files changed, 857 insertions(+), 12 deletions(-) create mode 100644 cloudinit/sources/DataSourceIBMCloud.py create mode 100644 tests/unittests/test_datasource/test_ibmcloud.py (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py index b8db6267..c7b5fe5f 100644 --- a/cloudinit/sources/DataSourceConfigDrive.py +++ b/cloudinit/sources/DataSourceConfigDrive.py @@ -14,6 +14,7 @@ from cloudinit import util from cloudinit.net import eni +from cloudinit.sources.DataSourceIBMCloud import get_ibm_platform from cloudinit.sources.helpers import openstack LOG = logging.getLogger(__name__) @@ -255,6 +256,15 @@ def find_candidate_devs(probe_optical=True): # an unpartitioned block device (ex sda, not sda1) devices = [d for d in candidates if d in by_label or not util.is_partition(d)] + + if devices: + # IBMCloud uses config-2 label, but limited to a single UUID. + ibm_platform, ibm_path = get_ibm_platform() + if ibm_path in devices: + devices.remove(ibm_path) + LOG.debug("IBMCloud device '%s' (%s) removed from candidate list", + ibm_path, ibm_platform) + return devices diff --git a/cloudinit/sources/DataSourceIBMCloud.py b/cloudinit/sources/DataSourceIBMCloud.py new file mode 100644 index 00000000..02b3d56f --- /dev/null +++ b/cloudinit/sources/DataSourceIBMCloud.py @@ -0,0 +1,325 @@ +# This file is part of cloud-init. See LICENSE file for license information. +"""Datasource for IBMCloud. + +IBMCloud is also know as SoftLayer or BlueMix. +IBMCloud hypervisor is xen (2018-03-10). + +There are 2 different api exposed launch methods. + * template: This is the legacy method of launching instances. + When booting from an image template, the system boots first into + a "provisioning" mode. There, host <-> guest mechanisms are utilized + to execute code in the guest and provision it. + + Cloud-init will disable itself when it detects that it is in the + provisioning mode. It detects this by the presence of + a file '/root/provisioningConfiguration.cfg'. + + When provided with user-data, the "first boot" will contain a + ConfigDrive-like disk labeled with 'METADATA'. If there is no user-data + provided, then there is no data-source. + + Cloud-init never does any network configuration in this mode. + + * os_code: Essentially "launch by OS Code" (Operating System Code). + This is a more modern approach. There is no specific "provisioning" boot. + Instead, cloud-init does all the customization. With or without + user-data provided, an OpenStack ConfigDrive like disk is attached. + + Only disks with label 'config-2' and UUID '9796-932E' are considered. + This is to avoid this datasource claiming ConfigDrive. This does + mean that 1 in 8^16 (~4 billion) Xen ConfigDrive systems will be + incorrectly identified as IBMCloud. + +TODO: + * is uuid (/sys/hypervisor/uuid) stable for life of an instance? + it seems it is not the same as data's uuid in the os_code case + but is in the template case. + +""" +import base64 +import json +import os + +from cloudinit import log as logging +from cloudinit import sources +from cloudinit.sources.helpers import openstack +from cloudinit import util + +LOG = logging.getLogger(__name__) + +IBM_CONFIG_UUID = "9796-932E" + + +class Platforms(object): + TEMPLATE_LIVE_METADATA = "Template/Live/Metadata" + TEMPLATE_LIVE_NODATA = "UNABLE TO BE IDENTIFIED." + TEMPLATE_PROVISIONING_METADATA = "Template/Provisioning/Metadata" + TEMPLATE_PROVISIONING_NODATA = "Template/Provisioning/No-Metadata" + OS_CODE = "OS-Code/Live" + + +PROVISIONING = ( + Platforms.TEMPLATE_PROVISIONING_METADATA, + Platforms.TEMPLATE_PROVISIONING_NODATA) + + +class DataSourceIBMCloud(sources.DataSource): + + dsname = 'IBMCloud' + system_uuid = None + + def __init__(self, sys_cfg, distro, paths): + super(DataSourceIBMCloud, self).__init__(sys_cfg, distro, paths) + self.source = None + self._network_config = None + self.network_json = None + self.platform = None + + def __str__(self): + root = super(DataSourceIBMCloud, self).__str__() + mstr = "%s [%s %s]" % (root, self.platform, self.source) + return mstr + + def _get_data(self): + results = read_md() + if results is None: + return False + + self.source = results['source'] + self.platform = results['platform'] + self.metadata = results['metadata'] + self.userdata_raw = results.get('userdata') + self.network_json = results.get('networkdata') + vd = results.get('vendordata') + self.vendordata_pure = vd + self.system_uuid = results['system-uuid'] + try: + self.vendordata_raw = sources.convert_vendordata(vd) + except ValueError as e: + LOG.warning("Invalid content in vendor-data: %s", e) + self.vendordata_raw = None + + return True + + def check_instance_id(self, sys_cfg): + """quickly (local check only) if self.instance_id is still valid + + in Template mode, the system uuid (/sys/hypervisor/uuid) is the + same as found in the METADATA disk. But that is not true in OS_CODE + mode. So we read the system_uuid and keep that for later compare.""" + if self.system_uuid is None: + return False + return self.system_uuid == _read_system_uuid() + + @property + def network_config(self): + if self.platform != Platforms.OS_CODE: + # If deployed from template, an agent in the provisioning + # environment handles networking configuration. Not cloud-init. + return {'config': 'disabled', 'version': 1} + if self._network_config is None: + if self.network_json is not None: + LOG.debug("network config provided via network_json") + self._network_config = openstack.convert_net_json( + self.network_json, known_macs=None) + else: + LOG.debug("no network configuration available.") + return self._network_config + + +def _read_system_uuid(): + uuid_path = "/sys/hypervisor/uuid" + if not os.path.isfile(uuid_path): + return None + return util.load_file(uuid_path).strip().lower() + + +def _is_xen(): + return os.path.exists("/proc/xen") + + +def _is_ibm_provisioning(): + return os.path.exists("/root/provisioningConfiguration.cfg") + + +def get_ibm_platform(): + """Return a tuple (Platform, path) + + If this is Not IBM cloud, then the return value is (None, None). + An instance in provisioning mode is considered running on IBM cloud.""" + label_mdata = "METADATA" + label_cfg2 = "CONFIG-2" + not_found = (None, None) + + if not _is_xen(): + return not_found + + # fslabels contains only the first entry with a given label. + fslabels = {} + try: + devs = util.blkid() + except util.ProcessExecutionError as e: + LOG.warning("Failed to run blkid: %s", e) + return (None, None) + + for dev in sorted(devs.keys()): + data = devs[dev] + label = data.get("LABEL", "").upper() + uuid = data.get("UUID", "").upper() + if label not in (label_mdata, label_cfg2): + continue + if label in fslabels: + LOG.warning("Duplicate fslabel '%s'. existing=%s current=%s", + label, fslabels[label], data) + continue + if label == label_cfg2 and uuid != IBM_CONFIG_UUID: + LOG.debug("Skipping %s with LABEL=%s due to uuid != %s: %s", + dev, label, uuid, data) + continue + fslabels[label] = data + + metadata_path = fslabels.get(label_mdata, {}).get('DEVNAME') + cfg2_path = fslabels.get(label_cfg2, {}).get('DEVNAME') + + if cfg2_path: + return (Platforms.OS_CODE, cfg2_path) + elif metadata_path: + if _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_METADATA, metadata_path) + else: + return (Platforms.TEMPLATE_LIVE_METADATA, metadata_path) + elif _is_ibm_provisioning(): + return (Platforms.TEMPLATE_PROVISIONING_NODATA, None) + return not_found + + +def read_md(): + """Read data from IBM Cloud. + + @return: None if not running on IBM Cloud. + dictionary with guaranteed fields: metadata, version + and optional fields: userdata, vendordata, networkdata. + Also includes the system uuid from /sys/hypervisor/uuid.""" + platform, path = get_ibm_platform() + if platform is None: + LOG.debug("This is not an IBMCloud platform.") + return None + elif platform in PROVISIONING: + LOG.debug("Cloud-init is disabled during provisioning: %s.", + platform) + return None + + ret = {'platform': platform, 'source': path, + 'system-uuid': _read_system_uuid()} + + try: + if os.path.isdir(path): + results = metadata_from_dir(path) + else: + results = util.mount_cb(path, metadata_from_dir) + except BrokenMetadata as e: + raise RuntimeError( + "Failed reading IBM config disk (platform=%s path=%s): %s" % + (platform, path, e)) + + ret.update(results) + return ret + + +class BrokenMetadata(IOError): + pass + + +def metadata_from_dir(source_dir): + """Walk source_dir extracting standardized metadata. + + Certain metadata keys are renamed to present a standardized set of metadata + keys. + + This function has a lot in common with ConfigDriveReader.read_v2 but + there are a number of inconsistencies, such key renames and as only + presenting a 'latest' version which make it an unlikely candidate to share + code. + + @return: Dict containing translated metadata, userdata, vendordata, + networkdata as present. + """ + + def opath(fname): + return os.path.join("openstack", "latest", fname) + + def load_json_bytes(blob): + return json.loads(blob.decode('utf-8')) + + files = [ + # tuples of (results_name, path, translator) + ('metadata_raw', opath('meta_data.json'), load_json_bytes), + ('userdata', opath('user_data'), None), + ('vendordata', opath('vendor_data.json'), load_json_bytes), + ('networkdata', opath('network_data.json'), load_json_bytes), + ] + + results = {} + for (name, path, transl) in files: + fpath = os.path.join(source_dir, path) + raw = None + try: + raw = util.load_file(fpath, decode=False) + except IOError as e: + LOG.debug("Failed reading path '%s': %s", fpath, e) + + if raw is None or transl is None: + data = raw + else: + try: + data = transl(raw) + except Exception as e: + raise BrokenMetadata("Failed decoding %s: %s" % (path, e)) + + results[name] = data + + if results.get('metadata_raw') is None: + raise BrokenMetadata( + "%s missing required file 'meta_data.json'" % source_dir) + + results['metadata'] = {} + + md_raw = results['metadata_raw'] + md = results['metadata'] + if 'random_seed' in md_raw: + try: + md['random_seed'] = base64.b64decode(md_raw['random_seed']) + except (ValueError, TypeError) as e: + raise BrokenMetadata( + "Badly formatted metadata random_seed entry: %s" % e) + + renames = ( + ('public_keys', 'public-keys'), ('hostname', 'local-hostname'), + ('uuid', 'instance-id')) + for mdname, newname in renames: + if mdname in md_raw: + md[newname] = md_raw[mdname] + + return results + + +# Used to match classes to dependencies +datasources = [ + (DataSourceIBMCloud, (sources.DEP_FILESYSTEM,)), +] + + +# Return a list of data sources that match this set of dependencies +def get_datasource_list(depends): + return sources.list_from_depends(depends, datasources) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description='Query IBM Cloud Metadata') + args = parser.parse_args() + data = read_md() + print(util.json_dumps(data)) + +# vi: ts=4 expandtab diff --git a/cloudinit/tests/test_util.py b/cloudinit/tests/test_util.py index d30643dc..3f37dbb6 100644 --- a/cloudinit/tests/test_util.py +++ b/cloudinit/tests/test_util.py @@ -3,6 +3,7 @@ """Tests for cloudinit.util""" import logging +from textwrap import dedent import cloudinit.util as util @@ -140,4 +141,75 @@ class TestGetHostnameFqdn(CiTestCase): [{'fqdn': True, 'metadata_only': True}, {'metadata_only': True}], mycloud.calls) + +class TestBlkid(CiTestCase): + ids = { + "id01": "1111-1111", + "id02": "22222222-2222", + "id03": "33333333-3333", + "id04": "44444444-4444", + "id05": "55555555-5555-5555-5555-555555555555", + "id06": "66666666-6666-6666-6666-666666666666", + "id07": "52894610484658920398", + "id08": "86753098675309867530", + "id09": "99999999-9999-9999-9999-999999999999", + } + + blkid_out = dedent("""\ + /dev/loop0: TYPE="squashfs" + /dev/loop1: TYPE="squashfs" + /dev/loop2: TYPE="squashfs" + /dev/loop3: TYPE="squashfs" + /dev/sda1: UUID="{id01}" TYPE="vfat" PARTUUID="{id02}" + /dev/sda2: UUID="{id03}" TYPE="ext4" PARTUUID="{id04}" + /dev/sda3: UUID="{id05}" TYPE="ext4" PARTUUID="{id06}" + /dev/sda4: LABEL="default" UUID="{id07}" UUID_SUB="{id08}" """ + """TYPE="zfs_member" PARTUUID="{id09}" + /dev/loop4: TYPE="squashfs" + """) + + maxDiff = None + + def _get_expected(self): + return ({ + "/dev/loop0": {"DEVNAME": "/dev/loop0", "TYPE": "squashfs"}, + "/dev/loop1": {"DEVNAME": "/dev/loop1", "TYPE": "squashfs"}, + "/dev/loop2": {"DEVNAME": "/dev/loop2", "TYPE": "squashfs"}, + "/dev/loop3": {"DEVNAME": "/dev/loop3", "TYPE": "squashfs"}, + "/dev/loop4": {"DEVNAME": "/dev/loop4", "TYPE": "squashfs"}, + "/dev/sda1": {"DEVNAME": "/dev/sda1", "TYPE": "vfat", + "UUID": self.ids["id01"], + "PARTUUID": self.ids["id02"]}, + "/dev/sda2": {"DEVNAME": "/dev/sda2", "TYPE": "ext4", + "UUID": self.ids["id03"], + "PARTUUID": self.ids["id04"]}, + "/dev/sda3": {"DEVNAME": "/dev/sda3", "TYPE": "ext4", + "UUID": self.ids["id05"], + "PARTUUID": self.ids["id06"]}, + "/dev/sda4": {"DEVNAME": "/dev/sda4", "TYPE": "zfs_member", + "LABEL": "default", + "UUID": self.ids["id07"], + "UUID_SUB": self.ids["id08"], + "PARTUUID": self.ids["id09"]}, + }) + + @mock.patch("cloudinit.util.subp") + def test_functional_blkid(self, m_subp): + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), util.blkid()) + m_subp.assert_called_with(["blkid", "-o", "full"], capture=True, + decode="replace") + + @mock.patch("cloudinit.util.subp") + def test_blkid_no_cache_uses_no_cache(self, m_subp): + """blkid should turn off cache if disable_cache is true.""" + m_subp.return_value = ( + self.blkid_out.format(**self.ids), "") + self.assertEqual(self._get_expected(), + util.blkid(disable_cache=True)) + m_subp.assert_called_with(["blkid", "-o", "full", "-c", "/dev/null"], + capture=True, decode="replace") + + # vi: ts=4 expandtab diff --git a/cloudinit/util.py b/cloudinit/util.py index cae8b196..fb4ee5fe 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1237,6 +1237,37 @@ def find_devs_with(criteria=None, oformat='device', return entries +def blkid(devs=None, disable_cache=False): + """Get all device tags details from blkid. + + @param devs: Optional list of device paths you wish to query. + @param disable_cache: Bool, set True to start with clean cache. + + @return: Dict of key value pairs of info for the device. + """ + if devs is None: + devs = [] + else: + devs = list(devs) + + cmd = ['blkid', '-o', 'full'] + if disable_cache: + cmd.extend(['-c', '/dev/null']) + cmd.extend(devs) + + # we have to decode with 'replace' as shelx.split (called by + # load_shell_content) can't take bytes. So this is potentially + # lossy of non-utf-8 chars in blkid output. + out, _ = subp(cmd, capture=True, decode="replace") + ret = {} + for line in out.splitlines(): + dev, _, data = line.partition(":") + ret[dev] = load_shell_content(data) + ret[dev]["DEVNAME"] = dev + + return ret + + def peek_file(fname, max_bytes): LOG.debug("Peeking at %s (max_bytes=%s)", fname, max_bytes) with open(fname, 'rb') as ifh: diff --git a/tests/unittests/test_datasource/test_ibmcloud.py b/tests/unittests/test_datasource/test_ibmcloud.py new file mode 100644 index 00000000..621cfe49 --- /dev/null +++ b/tests/unittests/test_datasource/test_ibmcloud.py @@ -0,0 +1,262 @@ +# This file is part of cloud-init. See LICENSE file for license information. + +from cloudinit.sources import DataSourceIBMCloud as ibm +from cloudinit.tests import helpers as test_helpers + +import base64 +import copy +import json +import mock +from textwrap import dedent + +D_PATH = "cloudinit.sources.DataSourceIBMCloud." + + +class TestIBMCloud(test_helpers.CiTestCase): + """Test the datasource.""" + def setUp(self): + super(TestIBMCloud, self).setUp() + pass + + +@mock.patch(D_PATH + "_is_xen", return_value=True) +@mock.patch(D_PATH + "_is_ibm_provisioning") +@mock.patch(D_PATH + "util.blkid") +class TestGetIBMPlatform(test_helpers.CiTestCase): + """Test the get_ibm_platform helper.""" + + blkid_base = { + "/dev/xvda1": { + "DEVNAME": "/dev/xvda1", "LABEL": "cloudimg-bootfs", + "TYPE": "ext3"}, + "/dev/xvda2": { + "DEVNAME": "/dev/xvda2", "LABEL": "cloudimg-rootfs", + "TYPE": "ext4"}, + } + + blkid_metadata_disk = { + "/dev/xvdh1": { + "DEVNAME": "/dev/xvdh1", "LABEL": "METADATA", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": "681B-8C5D", + "PARTUUID": "3d631e09-01"}, + } + + blkid_oscode_disk = { + "/dev/xvdh": { + "DEVNAME": "/dev/xvdh", "LABEL": "config-2", "TYPE": "vfat", + "SEC_TYPE": "msdos", "UUID": ibm.IBM_CONFIG_UUID} + } + + def setUp(self): + self.blkid_metadata = copy.deepcopy(self.blkid_base) + self.blkid_metadata.update(copy.deepcopy(self.blkid_metadata_disk)) + + self.blkid_oscode = copy.deepcopy(self.blkid_base) + self.blkid_oscode.update(copy.deepcopy(self.blkid_oscode_disk)) + + def test_id_template_live_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_LIVE_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = False + self.assertEqual( + (ibm.Platforms.TEMPLATE_LIVE_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_metadata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_METADATA.""" + m_blkid.return_value = self.blkid_metadata + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh1"), + ibm.get_ibm_platform()) + + def test_id_template_prov_nodata(self, m_blkid, m_is_prov, _m_xen): + """identify TEMPLATE_PROVISIONING_NODATA.""" + m_blkid.return_value = self.blkid_base + m_is_prov.return_value = True + self.assertEqual( + (ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None), + ibm.get_ibm_platform()) + + def test_id_os_code(self, m_blkid, m_is_prov, _m_xen): + """Identify OS_CODE.""" + m_blkid.return_value = self.blkid_oscode + m_is_prov.return_value = False + self.assertEqual((ibm.Platforms.OS_CODE, "/dev/xvdh"), + ibm.get_ibm_platform()) + + def test_id_os_code_must_match_uuid(self, m_blkid, m_is_prov, _m_xen): + """Test against false positive on openstack with non-ibm UUID.""" + blkid = self.blkid_oscode + blkid["/dev/xvdh"]["UUID"] = "9999-9999" + m_blkid.return_value = blkid + m_is_prov.return_value = False + self.assertEqual((None, None), ibm.get_ibm_platform()) + + +@mock.patch(D_PATH + "_read_system_uuid", return_value=None) +@mock.patch(D_PATH + "get_ibm_platform") +class TestReadMD(test_helpers.CiTestCase): + """Test the read_datasource helper.""" + + template_md = { + "files": [], + "network_config": {"content_path": "/content/interfaces"}, + "hostname": "ci-fond-ram", + "name": "ci-fond-ram", + "domain": "testing.ci.cloud-init.org", + "meta": {"dsmode": "net"}, + "uuid": "8e636730-9f5d-c4a5-327c-d7123c46e82f", + "public_keys": {"1091307": "ssh-rsa AAAAB3NzaC1...Hw== ci-pubkey"}, + } + + oscode_md = { + "hostname": "ci-grand-gannet.testing.ci.cloud-init.org", + "name": "ci-grand-gannet", + "uuid": "2f266908-8e6c-4818-9b5c-42e9cc66a785", + "random_seed": "bm90LXJhbmRvbQo=", + "crypt_key": "ssh-rsa AAAAB3NzaC1yc2..n6z/", + "configuration_token": "eyJhbGciOi..M3ZA", + "public_keys": {"1091307": "ssh-rsa AAAAB3N..Hw== ci-pubkey"}, + } + + content_interfaces = dedent("""\ + auto lo + iface lo inet loopback + + auto eth0 + allow-hotplug eth0 + iface eth0 inet static + address 10.82.43.5 + netmask 255.255.255.192 + """) + + userdata = b"#!/bin/sh\necho hi mom\n" + # meta.js file gets json encoded userdata as a list. + meta_js = '["#!/bin/sh\necho hi mom\n"]' + vendor_data = { + "cloud-init": "#!/bin/bash\necho 'root:$6$5ab01p1m1' | chpasswd -e"} + + network_data = { + "links": [ + {"id": "interface_29402281", "name": "eth0", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:00:f1:bd:da:25"}, + {"id": "interface_29402279", "name": "eth1", "mtu": None, + "type": "phy", "ethernet_mac_address": "06:98:5e:d0:7f:86"} + ], + "networks": [ + {"id": "network_109887563", "link": "interface_29402281", + "type": "ipv4", "ip_address": "10.82.43.2", + "netmask": "255.255.255.192", + "routes": [ + {"network": "10.0.0.0", "netmask": "255.0.0.0", + "gateway": "10.82.43.1"}, + {"network": "161.26.0.0", "netmask": "255.255.0.0", + "gateway": "10.82.43.1"}]}, + {"id": "network_109887551", "link": "interface_29402279", + "type": "ipv4", "ip_address": "108.168.194.252", + "netmask": "255.255.255.248", + "routes": [ + {"network": "0.0.0.0", "netmask": "0.0.0.0", + "gateway": "108.168.194.249"}]} + ], + "services": [ + {"type": "dns", "address": "10.0.80.11"}, + {"type": "dns", "address": "10.0.80.12"} + ], + } + + sysuuid = '7f79ebf5-d791-43c3-a723-854e8389d59f' + + def _get_expected_metadata(self, os_md): + """return expected 'metadata' for data loaded from meta_data.json.""" + os_md = copy.deepcopy(os_md) + renames = ( + ('hostname', 'local-hostname'), + ('uuid', 'instance-id'), + ('public_keys', 'public-keys')) + ret = {} + for osname, mdname in renames: + if osname in os_md: + ret[mdname] = os_md[osname] + if 'random_seed' in os_md: + ret['random_seed'] = base64.b64decode(os_md['random_seed']) + + return ret + + def test_provisioning_md(self, m_platform, m_sysuuid): + """Provisioning env with a metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_METADATA, "/dev/xvdh") + self.assertIsNone(ibm.read_md()) + + def test_provisioning_no_metadata(self, m_platform, m_sysuuid): + """Provisioning env with no metadata disk should return None.""" + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_PROVISIONING_NODATA, None) + self.assertIsNone(ibm.read_md()) + + def test_provisioning_not_ibm(self, m_platform, m_sysuuid): + """Provisioning env but not identified as IBM should return None.""" + m_platform.return_value = (None, None) + self.assertIsNone(ibm.read_md()) + + def test_template_live(self, m_platform, m_sysuuid): + """Template live environment should be identified.""" + tmpdir = self.tmp_dir() + m_platform.return_value = ( + ibm.Platforms.TEMPLATE_LIVE_METADATA, tmpdir) + m_sysuuid.return_value = self.sysuuid + + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.template_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/content/interfaces': self.content_interfaces, + 'meta.js': self.meta_js}) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.TEMPLATE_LIVE_METADATA, + ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.template_md), + ret['metadata']) + self.assertEqual(self.sysuuid, ret['system-uuid']) + + def test_os_code_live(self, m_platform, m_sysuuid): + """Verify an os_code metadata path.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + netdata = json.dumps(self.network_data) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/user_data': self.userdata, + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + 'openstack/latest/network_data.json': netdata, + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertEqual(self.userdata, ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + def test_os_code_live_no_userdata(self, m_platform, m_sysuuid): + """Verify os_code without user-data.""" + tmpdir = self.tmp_dir() + m_platform.return_value = (ibm.Platforms.OS_CODE, tmpdir) + test_helpers.populate_dir(tmpdir, { + 'openstack/latest/meta_data.json': json.dumps(self.oscode_md), + 'openstack/latest/vendor_data.json': json.dumps(self.vendor_data), + }) + + ret = ibm.read_md() + self.assertEqual(ibm.Platforms.OS_CODE, ret['platform']) + self.assertEqual(tmpdir, ret['source']) + self.assertIsNone(ret['userdata']) + self.assertEqual(self._get_expected_metadata(self.oscode_md), + ret['metadata']) + + +# vi: ts=4 expandtab diff --git a/tests/unittests/test_ds_identify.py b/tests/unittests/test_ds_identify.py index 85999b7a..53643989 100644 --- a/tests/unittests/test_ds_identify.py +++ b/tests/unittests/test_ds_identify.py @@ -9,6 +9,8 @@ from cloudinit import util from cloudinit.tests.helpers import ( CiTestCase, dir2dict, populate_dir) +from cloudinit.sources import DataSourceIBMCloud as dsibm + UNAME_MYSYS = ("Linux bart 4.4.0-62-generic #83-Ubuntu " "SMP Wed Jan 18 14:10:15 UTC 2017 x86_64 GNU/Linux") UNAME_PPC64EL = ("Linux diamond 4.4.0-83-generic #106-Ubuntu SMP " @@ -37,8 +39,8 @@ BLKID_UEFI_UBUNTU = [ POLICY_FOUND_ONLY = "search,found=all,maybe=none,notfound=disabled" POLICY_FOUND_OR_MAYBE = "search,found=all,maybe=all,notfound=disabled" -DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=enabled" -DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY = "search,found=all,maybe=all,notfound=disabled" +DI_DEFAULT_POLICY_NO_DMI = "search,found=all,maybe=all,notfound=enabled" DI_EC2_STRICT_ID_DEFAULT = "true" OVF_MATCH_STRING = 'http://schemas.dmtf.org/ovf/environment/1' @@ -64,6 +66,9 @@ P_SYS_VENDOR = "sys/class/dmi/id/sys_vendor" P_SEED_DIR = "var/lib/cloud/seed" P_DSID_CFG = "etc/cloud/ds-identify.cfg" +IBM_PROVISIONING_CHECK_PATH = "/root/provisioningConfiguration.cfg" +IBM_CONFIG_UUID = "9796-932E" + MOCK_VIRT_IS_KVM = {'name': 'detect_virt', 'RET': 'kvm', 'ret': 0} MOCK_VIRT_IS_VMWARE = {'name': 'detect_virt', 'RET': 'vmware', 'ret': 0} MOCK_VIRT_IS_XEN = {'name': 'detect_virt', 'RET': 'xen', 'ret': 0} @@ -239,6 +244,57 @@ class TestDsIdentify(CiTestCase): self._test_ds_found('ConfigDriveUpper') return + def test_ibmcloud_template_userdata_in_provisioning(self): + """Template provisioned with user-data during provisioning stage. + + Template provisioning with user-data has METADATA disk, + datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-metadata']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_userdata(self): + """Template provisioned with user-data first boot. + + Template provisioning with user-data has METADATA disk. + datasource should return found.""" + self._test_ds_found('IBMCloud-metadata') + + def test_ibmcloud_template_no_userdata_in_provisioning(self): + """Template provisioned with no user-data during provisioning. + + no disks attached. Datasource should return not found.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-nodisks']) + data['files'] = {IBM_PROVISIONING_CHECK_PATH: 'xxx'} + return self._check_via_dict(data, RC_NOT_FOUND) + + def test_ibmcloud_template_no_userdata(self): + """Template provisioned with no user-data first boot. + + no disks attached. Datasource should return found.""" + self._check_via_dict(VALID_CFG['IBMCloud-nodisks'], RC_NOT_FOUND) + + def test_ibmcloud_os_code(self): + """Launched by os code always has config-2 disk.""" + self._test_ds_found('IBMCloud-config-2') + + def test_ibmcloud_os_code_different_uuid(self): + """IBM cloud config-2 disks must be explicit match on UUID. + + If the UUID is not 9796-932E then we actually expect ConfigDrive.""" + data = copy.deepcopy(VALID_CFG['IBMCloud-config-2']) + offset = None + for m, d in enumerate(data['mocks']): + if d.get('name') == "blkid": + offset = m + break + if not offset: + raise ValueError("Expected to find 'blkid' mock, but did not.") + data['mocks'][offset]['out'] = d['out'].replace(dsibm.IBM_CONFIG_UUID, + "DEAD-BEEF") + self._check_via_dict( + data, rc=RC_FOUND, dslist=['ConfigDrive', DS_NONE]) + def test_policy_disabled(self): """A Builtin policy of 'disabled' should return not found. @@ -452,7 +508,7 @@ VALID_CFG = { }, 'Ec2-xen': { 'ds': 'Ec2', - 'mocks': [{'name': 'detect_virt', 'RET': 'xen', 'ret': 0}], + 'mocks': [MOCK_VIRT_IS_XEN], 'files': { 'sys/hypervisor/uuid': 'ec2c6e2f-5fac-4fc7-9c82-74127ec14bbb\n' }, @@ -579,6 +635,48 @@ VALID_CFG = { 'ds': 'Hetzner', 'files': {P_SYS_VENDOR: 'Hetzner\n'}, }, + 'IBMCloud-metadata': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'METADATA'}]), + }, + ], + }, + 'IBMCloud-config-2': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'ext3', 'PARTUUID': uuid4(), + 'UUID': uuid4(), 'LABEL': 'cloudimg-bootfs'}, + {'DEVNAME': 'xvdb', 'TYPE': 'vfat', 'LABEL': 'config-2', + 'UUID': dsibm.IBM_CONFIG_UUID}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4(), + 'UUID': uuid4()}, + ]), + }, + ], + }, + 'IBMCloud-nodisks': { + 'ds': 'IBMCloud', + 'mocks': [ + MOCK_VIRT_IS_XEN, + {'name': 'blkid', 'ret': 0, + 'out': blkid_out( + [{'DEVNAME': 'xvda1', 'TYPE': 'vfat', 'PARTUUID': uuid4()}, + {'DEVNAME': 'xvda2', 'TYPE': 'ext4', + 'LABEL': 'cloudimg-rootfs', 'PARTUUID': uuid4()}]), + }, + ], + }, } # vi: ts=4 expandtab diff --git a/tools/ds-identify b/tools/ds-identify index e2552c8b..9a2db5c4 100755 --- a/tools/ds-identify +++ b/tools/ds-identify @@ -92,6 +92,7 @@ DI_DMI_SYS_VENDOR="" DI_DMI_PRODUCT_SERIAL="" DI_DMI_PRODUCT_UUID="" DI_FS_LABELS="" +DI_FS_UUIDS="" DI_ISO9660_DEVS="" DI_KERNEL_CMDLINE="" DI_VIRT="" @@ -114,7 +115,7 @@ DI_DSNAME="" # be searched if there is no setting found in config. DI_DSLIST_DEFAULT="MAAS ConfigDrive NoCloud AltCloud Azure Bigstep \ CloudSigma CloudStack DigitalOcean AliYun Ec2 GCE OpenNebula OpenStack \ -OVF SmartOS Scaleway Hetzner" +OVF SmartOS Scaleway Hetzner IBMCloud" DI_DSLIST="" DI_MODE="" DI_ON_FOUND="" @@ -123,6 +124,8 @@ DI_ON_NOTFOUND="" DI_EC2_STRICT_ID_DEFAULT="true" +_IS_IBM_CLOUD="" + error() { set -- "ERROR:" "$@"; debug 0 "$@" @@ -196,7 +199,7 @@ read_fs_info() { return fi local oifs="$IFS" line="" delim="," - local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" + local ret=0 out="" labels="" dev="" label="" ftype="" isodevs="" uuids="" out=$(blkid -c /dev/null -o export) || { ret=$? error "failed running [$ret]: blkid -c /dev/null -o export" @@ -219,12 +222,14 @@ read_fs_info() { LABEL=*) label="${line#LABEL=}"; labels="${labels}${line#LABEL=}${delim}";; TYPE=*) ftype=${line#TYPE=};; + UUID=*) uuids="${uuids}${line#UUID=}$delim";; esac done [ -n "$dev" -a "$ftype" = "iso9660" ] && isodevs="${isodevs} ${dev}=$label" DI_FS_LABELS="${labels%${delim}}" + DI_FS_UUIDS="${uuids%${delim}}" DI_ISO9660_DEVS="${isodevs# }" } @@ -437,14 +442,25 @@ dmi_sys_vendor_is() { [ "${DI_DMI_SYS_VENDOR}" = "$1" ] } -has_fs_with_label() { - local label="$1" - case ",${DI_FS_LABELS}," in - *,$label,*) return 0;; +has_fs_with_uuid() { + case ",${DI_FS_UUIDS}," in + *,$1,*) return 0;; esac return 1 } +has_fs_with_label() { + # has_fs_with_label(label1[ ,label2 ..]) + # return 0 if a there is a filesystem that matches any of the labels. + local label="" + for label in "$@"; do + case ",${DI_FS_LABELS}," in + *,$label,*) return 0;; + esac + done + return 1 +} + nocase_equal() { # nocase_equal(a, b) # return 0 if case insenstive comparision a.lower() == b.lower() @@ -583,6 +599,8 @@ dscheck_NoCloud() { case " ${DI_DMI_PRODUCT_SERIAL} " in *\ ds=nocloud*) return ${DS_FOUND};; esac + + is_ibm_cloud && return ${DS_NOT_FOUND} for d in nocloud nocloud-net; do check_seed_dir "$d" meta-data user-data && return ${DS_FOUND} check_writable_seed_dir "$d" meta-data user-data && return ${DS_FOUND} @@ -594,9 +612,8 @@ dscheck_NoCloud() { } check_configdrive_v2() { - if has_fs_with_label "config-2"; then - return ${DS_FOUND} - elif has_fs_with_label "CONFIG-2"; then + is_ibm_cloud && return ${DS_NOT_FOUND} + if has_fs_with_label CONFIG-2 config-2; then return ${DS_FOUND} fi # look in /config-drive /seed/config_drive for a directory @@ -988,6 +1005,36 @@ dscheck_Hetzner() { return ${DS_NOT_FOUND} } +is_ibm_provisioning() { + [ -f "${PATH_ROOT}/root/provisioningConfiguration.cfg" ] +} + +is_ibm_cloud() { + cached "${_IS_IBM_CLOUD}" && return ${_IS_IBM_CLOUD} + local ret=1 + if [ "$DI_VIRT" = "xen" ]; then + if is_ibm_provisioning; then + ret=0 + elif has_fs_with_label METADATA metadata; then + ret=0 + elif has_fs_with_uuid 9796-932E && + has_fs_with_label CONFIG-2 config-2; then + ret=0 + fi + fi + _IS_IBM_CLOUD=$ret + return $ret +} + +dscheck_IBMCloud() { + if is_ibm_provisioning; then + debug 1 "cloud-init disabled during provisioning on IBMCloud" + return ${DS_NOT_FOUND} + fi + is_ibm_cloud && return ${DS_FOUND} + return ${DS_NOT_FOUND} +} + collect_info() { read_virt read_pid1_product_name -- cgit v1.2.3 From 5e4641a3cd5eac5cbf8ffd13d32514fbb9424077 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 27 Mar 2018 12:03:39 -0600 Subject: Hetzner: Exit early if dmi system-manufacturer is not Hetzner. This takes the same basic check that is in ds-identify. If the DMI system manufacturer (aka sys_vendor) is not 'Hetzner', then exit out of the datasource's get_data quickly. --- cloudinit/sources/DataSourceHetzner.py | 6 ++++++ tests/unittests/test_datasource/test_hetzner.py | 20 +++++++++++++++++++- 2 files changed, 25 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceHetzner.py b/cloudinit/sources/DataSourceHetzner.py index 769fe131..5c75b65b 100644 --- a/cloudinit/sources/DataSourceHetzner.py +++ b/cloudinit/sources/DataSourceHetzner.py @@ -44,6 +44,8 @@ class DataSourceHetzner(sources.DataSource): self.dsmode = sources.DSMODE_NETWORK def get_data(self): + if not on_hetzner(): + return False nic = cloudnet.find_fallback_nic() with cloudnet.EphemeralIPv4Network(nic, "169.254.0.1", 16, "169.254.255.255"): @@ -87,6 +89,10 @@ class DataSourceHetzner(sources.DataSource): return self._network_config +def on_hetzner(): + return util.read_dmi_data('system-manufacturer') == "Hetzner" + + # Used to match classes to dependencies datasources = [ (DataSourceHetzner, (sources.DEP_FILESYSTEM, )), diff --git a/tests/unittests/test_datasource/test_hetzner.py b/tests/unittests/test_datasource/test_hetzner.py index f1d1525e..a9c12597 100644 --- a/tests/unittests/test_datasource/test_hetzner.py +++ b/tests/unittests/test_datasource/test_hetzner.py @@ -73,7 +73,10 @@ class TestDataSourceHetzner(CiTestCase): @mock.patch('cloudinit.net.find_fallback_nic') @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') @mock.patch('cloudinit.sources.helpers.hetzner.read_userdata') - def test_read_data(self, m_usermd, m_readmd, m_fallback_nic, m_net): + @mock.patch('cloudinit.sources.DataSourceHetzner.on_hetzner') + def test_read_data(self, m_on_hetzner, m_usermd, m_readmd, m_fallback_nic, + m_net): + m_on_hetzner.return_value = True m_readmd.return_value = METADATA.copy() m_usermd.return_value = USERDATA m_fallback_nic.return_value = 'eth0' @@ -97,3 +100,18 @@ class TestDataSourceHetzner(CiTestCase): self.assertIsInstance(ds.get_public_ssh_keys(), list) self.assertEqual(ds.get_userdata_raw(), USERDATA) self.assertEqual(ds.get_vendordata_raw(), METADATA.get('vendor_data')) + + @mock.patch('cloudinit.sources.helpers.hetzner.read_metadata') + @mock.patch('cloudinit.net.find_fallback_nic') + @mock.patch('cloudinit.sources.DataSourceHetzner.on_hetzner') + def test_not_on_hetzner_returns_false(self, m_on_hetzner, m_find_fallback, + m_read_md): + """If helper 'on_hetzner' returns False, return False from get_data.""" + m_on_hetzner.return_value = False + ds = self.get_ds() + ret = ds.get_data() + + self.assertFalse(ret) + # These are a white box attempt to ensure it did not search. + m_find_fallback.assert_not_called() + m_read_md.assert_not_called() -- cgit v1.2.3