From d1e26fc118cdb641829fbe6b838ef46d4ab1f113 Mon Sep 17 00:00:00 2001 From: Kiril Vladimiroff Date: Wed, 19 Feb 2014 10:45:53 +0200 Subject: Read encoded with base64 user data This allows users of CloudSigma's VM to encode their user data with base64. In order to do that thet have to add the ``cloudinit-user-data`` field to the ``base64_fields``. The latter is a comma-separated field with all the meta fields whit base64 encoded values. --- cloudinit/sources/DataSourceCloudSigma.py | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e734d7e5..79ced3f4 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -15,6 +15,7 @@ # # You should have received a copy of the GNU General Public License # along with this program. If not, see . +from base64 import b64decode import re from cloudinit import log as logging @@ -60,7 +61,11 @@ class DataSourceCloudSigma(sources.DataSource): if dsmode == "disabled" or dsmode != self.dsmode: return False + base64_fields = server_meta.get('base64_fields', '').split(',') self.userdata_raw = server_meta.get('cloudinit-user-data', "") + if 'cloudinit-user-data' in base64_fields: + self.userdata_raw = b64decode(self.userdata_raw) + self.metadata = server_context self.ssh_public_key = server_meta['ssh_public_key'] -- cgit v1.2.3 From e91fd55890922d9054523afab4d7e4b268c1be64 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 18 Mar 2014 15:57:30 -0600 Subject: Windows Azure defines the ephemeral0 mount as being a per-boot instead of per instance. Under a variety of circumstances, the ephemeral device may be presented as a default device. This patch detects when that situation happens and triggers CC modules disk-setup and mounts to run again. Details of changes for cloudinit/sources/DataSourceAzure.py: - auto-detect the location of ephemeral0 - check each boot if ephemeral0 is new - done via NTFS w/ label of "Temporary Storage" w/ no files on it - if device is mounted, datasource will unmount it - if is new, change mounts and disk-setup to always for that boot only --- cloudinit/sources/DataSourceAzure.py | 98 ++++++++++++++++++++++++++++++++++-- 1 file changed, 94 insertions(+), 4 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index c7331da5..256e0539 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -18,12 +18,14 @@ import base64 import crypt +import fnmatch import os import os.path import time from xml.dom import minidom from cloudinit import log as logging +from cloudinit.settings import PER_ALWAYS from cloudinit import sources from cloudinit import util @@ -53,14 +55,15 @@ BUILTIN_CLOUD_CONFIG = { 'disk_setup': { 'ephemeral0': {'table_type': 'mbr', 'layout': True, - 'overwrite': False} - }, + 'overwrite': False}, + }, 'fs_setup': [{'filesystem': 'ext4', 'device': 'ephemeral0.1', - 'replace_fs': 'ntfs'}] + 'replace_fs': 'ntfs'}], } DS_CFG_PATH = ['datasource', DS_NAME] +DEF_EPHEMERAL_LABEL = 'Temporary Storage' class DataSourceAzureNet(sources.DataSource): @@ -189,8 +192,17 @@ class DataSourceAzureNet(sources.DataSource): LOG.warn("failed to get instance id in %s: %s", shcfgxml, e) pubkeys = pubkeys_from_crt_files(fp_files) - self.metadata['public-keys'] = pubkeys + + found_ephemeral = find_ephemeral_disk() + if found_ephemeral: + self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral + LOG.debug("using detected ephemeral0 of %s" % found_ephemeral) + + cc_modules_override = support_new_ephemeral(self.sys_cfg) + if cc_modules_override: + self.cfg['cloud_config_modules'] = cc_modules_override + return True def device_name_to_device(self, name): @@ -200,6 +212,84 @@ class DataSourceAzureNet(sources.DataSource): return self.cfg +def count_files(mp): + return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*')) + + +def find_ephemeral_part(): + """ + Locate the default ephmeral0.1 device. This will be the first device + that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure + gets more ephemeral devices, this logic will only identify the first + such device. + """ + c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL) + c_fstype_devs = util.find_devs_with("TYPE=ntfs") + for dev in c_label_devs: + if dev in c_fstype_devs: + return dev + return None + + +def find_ephemeral_disk(): + """ + Get the ephemeral disk. + """ + part_dev = find_ephemeral_part() + if part_dev and str(part_dev[-1]).isdigit(): + return part_dev[:-1] + elif part_dev: + return part_dev + return None + + +def support_new_ephemeral(cfg): + """ + Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device + may be presented as a fresh device, or not. + + Since the knowledge of when a disk is supposed to be plowed under is specific + to Windows Azure, the logic resides here in the datasource. When a new ephemeral + device is detected, cloud-init overrides the default frequency for both disk-setup + and mounts for the current boot only. + """ + device = find_ephemeral_part() + if not device: + LOG.debug("no default fabric formated ephemeral0.1 found") + return None + LOG.debug("fabric formated ephemeral0.1 device at %s" % device) + + file_count = 0 + try: + file_count = util.mount_cb(device, count_files) + except: + return None + LOG.debug("fabric prepared ephmeral0.1 has %s files on it" % file_count) + + if file_count >= 1: + LOG.debug("fabric prepared ephemeral0.1 will be preserved") + return None + else: + with util.unmounter(device): + LOG.debug("unmounted fabric prepared ephemeral0.1") + + LOG.debug("cloud-init will format ephemeral0.1 this boot.") + LOG.debug("setting disk_setup and mounts modules 'always' for this boot") + + cc_modules = cfg.get('cloud_config_modules') + if cc_modules: + mod_list = [] + for mod in cc_modules: + if mod in ("disk_setup", "mounts"): + mod_list.append([mod, PER_ALWAYS]) + LOG.debug("set module '%s' to 'always' for this boot" % mod) + else: + mod_list.append(mod) + return mod_list + + return None + + def handle_set_hostname(enabled, hostname, cfg): if not util.is_true(enabled): return -- cgit v1.2.3 From 2025632daf5b202dbe6424a112d8689a1f93d9ac Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 13:29:27 -0400 Subject: minor changes: be more careful about umount and warn on fail --- cloudinit/sources/DataSourceAzure.py | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 256e0539..ffb4ff87 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -269,25 +269,28 @@ def support_new_ephemeral(cfg): if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") return None - else: - with util.unmounter(device): - LOG.debug("unmounted fabric prepared ephemeral0.1") + elif device in util.mounted(): + try: + util.subp(['umount', device]) + except util.ProcessExecutionError as e: + LOG.warn("Failed to unmount %s, will not reformat", device) + return None LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") cc_modules = cfg.get('cloud_config_modules') - if cc_modules: - mod_list = [] - for mod in cc_modules: - if mod in ("disk_setup", "mounts"): - mod_list.append([mod, PER_ALWAYS]) - LOG.debug("set module '%s' to 'always' for this boot" % mod) - else: - mod_list.append(mod) - return mod_list + if not cc_modules: + return None - return None + mod_list = [] + for mod in cc_modules: + if mod in ("disk_setup", "mounts"): + mod_list.append([mod, PER_ALWAYS]) + LOG.debug("set module '%s' to 'always' for this boot" % mod) + else: + mod_list.append(mod) + return mod_list def handle_set_hostname(enabled, hostname, cfg): -- cgit v1.2.3 From 47019b77b23c72cd2e71098c01c4d86b06d1de8c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 13:38:37 -0400 Subject: change to unmount then check to address possible race --- cloudinit/sources/DataSourceAzure.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index ffb4ff87..39b8f4f6 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -269,7 +269,18 @@ def support_new_ephemeral(cfg): if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") return None - elif device in util.mounted(): + else: + # if device was already mounted, then we need to unmount it + # race conditions could allow for a check-then-unmount + # to have a false positive. so just unmount and then check. + try: + util.subp(['umount', device]) + except util.ProcessExecutionError as e: + if device in util.mounts(): + LOG.warn("Failed to unmount %s, will not reformat", device) + return None + + if device in util.mounts(): try: util.subp(['umount', device]) except util.ProcessExecutionError as e: -- cgit v1.2.3 From 2fe478831680a270c456122fdeecc3c639a4ec62 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Wed, 19 Mar 2014 14:00:41 -0400 Subject: Azure: pep8 and pylint cleanups from previous commit. --- cloudinit/sources/DataSourceAzure.py | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py index 39b8f4f6..bd75e6d8 100644 --- a/cloudinit/sources/DataSourceAzure.py +++ b/cloudinit/sources/DataSourceAzure.py @@ -197,7 +197,7 @@ class DataSourceAzureNet(sources.DataSource): found_ephemeral = find_ephemeral_disk() if found_ephemeral: self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral - LOG.debug("using detected ephemeral0 of %s" % found_ephemeral) + LOG.debug("using detected ephemeral0 of %s", found_ephemeral) cc_modules_override = support_new_ephemeral(self.sys_cfg) if cc_modules_override: @@ -248,23 +248,23 @@ def support_new_ephemeral(cfg): Windows Azure makes ephemeral devices ephemeral to boot; a ephemeral device may be presented as a fresh device, or not. - Since the knowledge of when a disk is supposed to be plowed under is specific - to Windows Azure, the logic resides here in the datasource. When a new ephemeral - device is detected, cloud-init overrides the default frequency for both disk-setup - and mounts for the current boot only. + Since the knowledge of when a disk is supposed to be plowed under is + specific to Windows Azure, the logic resides here in the datasource. When a + new ephemeral device is detected, cloud-init overrides the default + frequency for both disk-setup and mounts for the current boot only. """ device = find_ephemeral_part() if not device: LOG.debug("no default fabric formated ephemeral0.1 found") return None - LOG.debug("fabric formated ephemeral0.1 device at %s" % device) + LOG.debug("fabric formated ephemeral0.1 device at %s", device) file_count = 0 try: file_count = util.mount_cb(device, count_files) except: return None - LOG.debug("fabric prepared ephmeral0.1 has %s files on it" % file_count) + LOG.debug("fabric prepared ephmeral0.1 has %s files on it", file_count) if file_count >= 1: LOG.debug("fabric prepared ephemeral0.1 will be preserved") @@ -277,15 +277,9 @@ def support_new_ephemeral(cfg): util.subp(['umount', device]) except util.ProcessExecutionError as e: if device in util.mounts(): - LOG.warn("Failed to unmount %s, will not reformat", device) + LOG.warn("Failed to unmount %s, will not reformat.", device) + LOG.debug("Failed umount: %s", e) return None - - if device in util.mounts(): - try: - util.subp(['umount', device]) - except util.ProcessExecutionError as e: - LOG.warn("Failed to unmount %s, will not reformat", device) - return None LOG.debug("cloud-init will format ephemeral0.1 this boot.") LOG.debug("setting disk_setup and mounts modules 'always' for this boot") @@ -298,7 +292,7 @@ def support_new_ephemeral(cfg): for mod in cc_modules: if mod in ("disk_setup", "mounts"): mod_list.append([mod, PER_ALWAYS]) - LOG.debug("set module '%s' to 'always' for this boot" % mod) + LOG.debug("set module '%s' to 'always' for this boot", mod) else: mod_list.append(mod) return mod_list -- cgit v1.2.3 From 11d6dbfad89e3f9a56925f7671fa7ee3e86af918 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 20 Mar 2014 12:33:29 -0400 Subject: NoCloud: fix broken seedfrom on the kernel command line This was broken in the VendorData add. LP: #1295223 --- cloudinit/sources/DataSourceNoCloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py index 8dc96ab6..a315aae0 100644 --- a/cloudinit/sources/DataSourceNoCloud.py +++ b/cloudinit/sources/DataSourceNoCloud.py @@ -57,7 +57,7 @@ class DataSourceNoCloud(sources.DataSource): md = {} if parse_cmdline_data(self.cmdline_id, md): found.append("cmdline") - mydata.update(md) + mydata['meta-data'].update(md) except: util.logexc(LOG, "Unable to parse command line data") return False -- cgit v1.2.3 From 9486c1a1abacb9829e5ab172212d57c3735e35e0 Mon Sep 17 00:00:00 2001 From: Enol Fernandez Date: Tue, 25 Mar 2014 16:31:16 +0100 Subject: Added base64 decoding of user data for OpenNebula. --- cloudinit/sources/DataSourceOpenNebula.py | 12 +++++++++++ tests/unittests/test_datasource/test_opennebula.py | 25 ++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index b0464cbb..d91b80ab 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -4,11 +4,13 @@ # Copyright (C) 2012 Yahoo! Inc. # Copyright (C) 2012-2013 CERIT Scientific Cloud # Copyright (C) 2012-2013 OpenNebula.org +# Copyright (C) 2014 Consejo Superior de Investigaciones Cientificas # # Author: Scott Moser # Author: Joshua Harlow # Author: Vlastimil Holer # Author: Javier Fontan +# Author: Enol Fernandez # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as @@ -22,6 +24,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . +import base64 import os import pwd import re @@ -417,6 +420,15 @@ def read_context_disk_dir(source_dir, asuser=None): elif "USERDATA" in context: results['userdata'] = context["USERDATA"] + # b64decode user data if necessary (default) + if 'userdata' in results: + userdata_encoding = context.get('USERDATA_ENCODING', None) + if userdata_encoding in (None, "base64"): + try: + results['userdata'] = base64.b64decode(results['userdata']) + except TypeError: + LOG.warn("Failed base64 decoding of userdata") + # generate static /etc/network/interfaces # only if there are any required context variables # http://opennebula.org/documentation:rel3.8:cong#network_configuration diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 6fc5b2ac..47e7acbc 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -4,6 +4,7 @@ from cloudinit import util from mocker import MockerTestCase from tests.unittests.helpers import populate_dir +from base64 import b64encode import os import pwd @@ -164,10 +165,30 @@ class TestOpenNebulaDataSource(MockerTestCase): public_keys.append(SSH_KEY % (c + 1,)) - def test_user_data(self): + def test_user_data_plain(self): for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: USER_DATA}) + populate_context_dir(my_d, {k: USER_DATA, + 'USERDATA_ENCODING': ''}) + results = ds.read_context_disk_dir(my_d) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_default_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64encode(USER_DATA)}) + results = ds.read_context_disk_dir(my_d) + + self.assertTrue('userdata' in results) + self.assertEqual(USER_DATA, results['userdata']) + + def test_user_data_base64_encoding(self): + for k in ('USER_DATA', 'USERDATA'): + my_d = os.path.join(self.tmp, k) + populate_context_dir(my_d, {k: b64encode(USER_DATA), + 'USERDATA_ENCODING': 'base64'}) results = ds.read_context_disk_dir(my_d) self.assertTrue('userdata' in results) -- cgit v1.2.3 From 2ecefdf51cd93b593bea450b4d751021da91e748 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Thu, 27 Mar 2014 10:03:27 -0400 Subject: change 'default' encoding to be "None" Instead of just trying to see if userdata decodes as the indication that it should be encoded, the user must explicitly set this. The "just try it" will fail in the case where the user had other use of user-data and wanted a blob of data to go through unrecognized by cloud-init. In cases where there can be mistake in automatic behavior, and some users may be relaying on old behavior, its best to just require explicit use. --- cloudinit/sources/DataSourceOpenNebula.py | 5 +++-- tests/unittests/test_datasource/test_opennebula.py | 7 ++++--- 2 files changed, 7 insertions(+), 5 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py index d91b80ab..34557f8b 100644 --- a/cloudinit/sources/DataSourceOpenNebula.py +++ b/cloudinit/sources/DataSourceOpenNebula.py @@ -422,8 +422,9 @@ def read_context_disk_dir(source_dir, asuser=None): # b64decode user data if necessary (default) if 'userdata' in results: - userdata_encoding = context.get('USERDATA_ENCODING', None) - if userdata_encoding in (None, "base64"): + encoding = context.get('USERDATA_ENCODING', + context.get('USER_DATA_ENCODING')) + if encoding == "base64": try: results['userdata'] = base64.b64decode(results['userdata']) except TypeError: diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py index 47e7acbc..ec6b752b 100644 --- a/tests/unittests/test_datasource/test_opennebula.py +++ b/tests/unittests/test_datasource/test_opennebula.py @@ -175,14 +175,15 @@ class TestOpenNebulaDataSource(MockerTestCase): self.assertTrue('userdata' in results) self.assertEqual(USER_DATA, results['userdata']) - def test_user_data_default_encoding(self): + def test_user_data_encoding_required_for_decode(self): + b64userdata = b64encode(USER_DATA) for k in ('USER_DATA', 'USERDATA'): my_d = os.path.join(self.tmp, k) - populate_context_dir(my_d, {k: b64encode(USER_DATA)}) + populate_context_dir(my_d, {k: b64userdata}) results = ds.read_context_disk_dir(my_d) self.assertTrue('userdata' in results) - self.assertEqual(USER_DATA, results['userdata']) + self.assertEqual(b64userdata, results['userdata']) def test_user_data_base64_encoding(self): for k in ('USER_DATA', 'USERDATA'): -- cgit v1.2.3 From f7fa9d2aa9abd81b8f8b79b95bdb1fc0c10b5fe9 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Tue, 27 May 2014 10:17:18 -0600 Subject: Enable vendordata for CloudSigma (LP: #1303986) --- cloudinit/sources/DataSourceCloudSigma.py | 2 ++ tests/unittests/test_datasource/test_cloudsigma.py | 28 +++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e1c7e566..ad2a044a 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -66,6 +66,8 @@ class DataSourceCloudSigma(sources.DataSource): self.userdata_raw = server_meta.get('cloudinit-user-data', "") if 'cloudinit-user-data' in base64_fields: self.userdata_raw = b64decode(self.userdata_raw) + if 'cloudinit' in server_context.get('vendor_data', {}): + self.vendordata_raw = server_context["vendor_data"]["cloudinit"] self.metadata = server_context self.ssh_public_key = server_meta['ssh_public_key'] diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index adbb4afb..a1342a86 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -20,7 +20,11 @@ SERVER_CONTEXT = { "smp": 1, "tags": ["much server", "very performance"], "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890", - "vnc_password": "9e84d6cb49e46379" + "vnc_password": "9e84d6cb49e46379", + "vendor_data": { + "location": "zrh", + "cloudinit": "#cloud-config\n\n...", + } } @@ -68,3 +72,25 @@ class DataSourceCloudSigmaTest(TestCase): self.datasource.get_data() self.assertEqual(self.datasource.userdata_raw, b'hi world\n') + + def test_vendor_data(self): + self.assertEqual(self.datasource.vendordata_raw, + SERVER_CONTEXT['vendor_data']['cloudinit']) + + def test_lack_of_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) + + def test_lack_of_cloudinit_key_in_vendor_data(self): + stripped_context = copy.deepcopy(SERVER_CONTEXT) + del stripped_context["vendor_data"]["cloudinit"] + self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.cepko = CepkoMock(stripped_context) + self.datasource.get_data() + + self.assertIsNone(self.datasource.vendordata_raw) -- cgit v1.2.3 From 71d817c427f06e9e1f5d547d5db191e541963d31 Mon Sep 17 00:00:00 2001 From: Kiril Vladimiroff Date: Fri, 30 May 2014 14:19:10 +0300 Subject: Use dmidecode to detect if cloud-init runs in CloudSigma's infrastructure --- cloudinit/sources/DataSourceCloudSigma.py | 22 ++++++++++++++++++++++ tests/unittests/test_datasource/test_cloudsigma.py | 1 + 2 files changed, 23 insertions(+) (limited to 'cloudinit/sources') diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index e1c7e566..fffff91e 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -20,6 +20,7 @@ import re from cloudinit import log as logging from cloudinit import sources +from cloudinit import util from cloudinit.cs_utils import Cepko LOG = logging.getLogger(__name__) @@ -40,12 +41,33 @@ class DataSourceCloudSigma(sources.DataSource): self.ssh_public_key = '' sources.DataSource.__init__(self, sys_cfg, distro, paths) + def is_running_in_cloudsigma(self): + """ + Uses dmidecode to detect if this instance of cloud-init is running + in the CloudSigma's infrastructure. + """ + dmidecode_path = util.which('dmidecode') + if not dmidecode_path: + return False + + LOG.debug("Determining hypervisor product name via dmidecode") + try: + system_product_name, _ = util.subp([dmidecode_path, "-s", "system-product-name"]) + return 'cloudsigma' in system_product_name.lower() + except: + LOG.exception("Failed to get hypervisor product name") + + return False + def get_data(self): """ Metadata is the whole server context and /meta/cloud-config is used as userdata. """ dsmode = None + if not self.is_running_in_cloudsigma(): + return False + try: server_context = self.cepko.all().result server_meta = server_context['meta'] diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py index adbb4afb..25dc12f3 100644 --- a/tests/unittests/test_datasource/test_cloudsigma.py +++ b/tests/unittests/test_datasource/test_cloudsigma.py @@ -35,6 +35,7 @@ class CepkoMock(Cepko): class DataSourceCloudSigmaTest(TestCase): def setUp(self): self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "") + self.datasource.is_running_in_cloudsigma = lambda: True self.datasource.cepko = CepkoMock(SERVER_CONTEXT) self.datasource.get_data() -- cgit v1.2.3 From 2d36a7ce4a0ccec3bd2881dd99d6d5012a85fe3c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 30 May 2014 14:46:53 -0400 Subject: minor cleanups. * do not run dmidecode on arm. * line length * comment that 60 second time out is expected --- cloudinit/cs_utils.py | 2 ++ cloudinit/sources/DataSourceCloudSigma.py | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py index 1db3f110..dcf56431 100644 --- a/cloudinit/cs_utils.py +++ b/cloudinit/cs_utils.py @@ -35,8 +35,10 @@ import platform import serial +# these high timeouts are necessary as read may read a lot of data. READ_TIMEOUT = 60 WRITE_TIMEOUT = 10 + SERIAL_PORT = '/dev/ttyS1' if platform.system() == 'Windows': SERIAL_PORT = 'COM2' diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py index fffff91e..a8c04d19 100644 --- a/cloudinit/sources/DataSourceCloudSigma.py +++ b/cloudinit/sources/DataSourceCloudSigma.py @@ -16,6 +16,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . from base64 import b64decode +import os import re from cloudinit import log as logging @@ -46,16 +47,23 @@ class DataSourceCloudSigma(sources.DataSource): Uses dmidecode to detect if this instance of cloud-init is running in the CloudSigma's infrastructure. """ + uname_arch = os.uname()[4] + if uname_arch.startswith("arm") or uname_arch == "aarch64": + # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process + LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)") + return False + dmidecode_path = util.which('dmidecode') if not dmidecode_path: return False LOG.debug("Determining hypervisor product name via dmidecode") try: - system_product_name, _ = util.subp([dmidecode_path, "-s", "system-product-name"]) + cmd = [dmidecode_path, "--string", "system-product-name"] + system_product_name, _ = util.subp(cmd) return 'cloudsigma' in system_product_name.lower() except: - LOG.exception("Failed to get hypervisor product name") + LOG.warn("Failed to get hypervisor product name via dmidecode") return False -- cgit v1.2.3 From 2bb228751a223f21296ff9166b42583c670359a5 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Mon, 2 Jun 2014 16:56:31 -0400 Subject: SmartOS test: do not require existance of /dev/ttyS1. LP: #1316597 --- ChangeLog | 1 + cloudinit/sources/DataSourceSmartOS.py | 10 ++++++++-- tests/unittests/test_datasource/test_smartos.py | 1 + 3 files changed, 10 insertions(+), 2 deletions(-) (limited to 'cloudinit/sources') diff --git a/ChangeLog b/ChangeLog index 2dee548e..c455f469 100644 --- a/ChangeLog +++ b/ChangeLog @@ -3,6 +3,7 @@ - Enable vendordata on CloudSigma datasource (LP: #1303986) - Poll on /dev/ttyS1 in CloudSigma datasource only if dmidecode says we're running on cloudsigma (LP: #1316475) [Kiril Vladimiroff] + - SmartOS test: do not require existance of /dev/ttyS1. [LP: #1316597] 0.7.5: - open 0.7.5 - Add a debug log message around import failures diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 7c1eb09a..65ec0339 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -170,8 +170,9 @@ class DataSourceSmartOS(sources.DataSource): md = {} ud = "" - if not os.path.exists(self.seed): - LOG.debug("Host does not appear to be on SmartOS") + if not device_exists(self.seed): + LOG.debug("No serial device '%s' found for SmartOS datasource", + self.seed) return False uname_arch = os.uname()[4] @@ -274,6 +275,11 @@ class DataSourceSmartOS(sources.DataSource): b64=b64) +def device_exists(device): + """Symplistic method to determine if the device exists or not""" + return os.path.exists(device) + + def get_serial(seed_device, seed_timeout): """This is replaced in unit testing, allowing us to replace serial.Serial with a mocked class. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 45f1708a..f64aea07 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -171,6 +171,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase): self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) self.apply_patches([(os, 'uname', _os_uname)]) + self.apply_patches([(mod, 'device_exists', lambda d: True)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, paths=self.paths) return dsrc -- cgit v1.2.3