summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2014-02-14 00:15:08 -0500
committerScott Moser <smoser@ubuntu.com>2014-02-14 00:15:08 -0500
commit12672e77a2881f9a87d2dcb4217e5e56b8b3dfd6 (patch)
tree311d2acd0e220ffbf316c7d0d75380f160a67518
parent053667688d7c2ad51e569c62e00dac1942e46f62 (diff)
parent1bf99b6fe9d11a9e3b1d452940d21779347ea461 (diff)
downloadvyos-cloud-init-12672e77a2881f9a87d2dcb4217e5e56b8b3dfd6.tar.gz
vyos-cloud-init-12672e77a2881f9a87d2dcb4217e5e56b8b3dfd6.zip
re-work vendor-data and smartos
This reduces how much cloud-init is explicitly involved in what "vendor-data" could accomplish. The goal of vendor-data was to provide the vendor with a channel to run arbitrary code that accomodate for their specific platform. Much of those accomodations are currently being done in cloud-init. However, this now moves some of those things to default "vendor-data", instead of cloud-init proper. Basically, now we have an 'sdc:vendor-data' key in the metadata. If that does not exist, then cloud-init will use the default. The default, provides a boothook. That boothook writes a file into /var/lib/cloud/per-boot/ . That file will be both written on every boot and then executed at rc.local time frame (by 'scripts-per-boot'). It will then execute /var/lib/cloud/instance/data/user-script and /var/lib/cloud/instance/data/operator-script if they exist. So, the things that cloud-init is now doing outside of the default vendor-data that I would rather be done in vendor-data is: * managing the population of instance/data/user-script and instance/data/operator-script. These could very easily be done from the boothook, but doing them in cloud-init removes the necessity for having a 'mdata-get' command in the image (or some other way for the boothook script to query the datasource). * managing the LEGACY things.
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py72
-rw-r--r--tests/unittests/test_datasource/test_smartos.py51
2 files changed, 107 insertions, 16 deletions
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 140c7814..ec561b0d 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -50,7 +50,8 @@ SMARTOS_ATTRIB_MAP = {
'iptables_disable': ('iptables_disable', True),
'motd_sys_info': ('motd_sys_info', True),
'availability_zone': ('sdc:datacenter_name', True),
- 'vendordata': ('sdc:operator-script', False),
+ 'vendor-data': ('sdc:vendor-data', False),
+ 'operator-script': ('sdc:operator-script', False),
}
DS_NAME = 'SmartOS'
@@ -95,6 +96,46 @@ BUILTIN_CLOUD_CONFIG = {
'device': 'ephemeral0'}],
}
+## builtin vendor-data is a boothook that writes a script into
+## /var/lib/cloud/scripts/per-boot. *That* script then handles
+## executing the 'operator-script' and 'user-script' files
+## that cloud-init writes into /var/lib/cloud/instance/data/
+## if they exist.
+##
+## This is all very indirect, but its done like this so that at
+## some point in the future, perhaps cloud-init wouldn't do it at
+## all, but rather the vendor actually provide vendor-data that accomplished
+## their desires. (That is the point of vendor-data).
+##
+## cloud-init does cheat a bit, and write the operator-script and user-script
+## itself. It could have the vendor-script do that, but it seems better
+## to not require the image to contain a tool (mdata-get) to read those
+## keys when we have a perfectly good one inside cloud-init.
+BUILTIN_VENDOR_DATA = """\
+#cloud-boothook
+#!/bin/sh
+fname="%(per_boot_d)s/01_smartos_vendor_data.sh"
+mkdir -p "${fname%%/*}"
+cat > "$fname" <<"END_SCRIPT"
+#!/bin/sh
+##
+# This file is written as part of the default vendor data for SmartOS.
+# The SmartOS datasource writes the listed file from the listed metadata key
+# sdc:operator-script -> %(operator_script)s
+# user-script -> %(user_script)s
+#
+# You can view content with 'mdata-get <key>'
+#
+for script in "%(operator_script)s" "%(user_script)s"; do
+ [ -x "$script" ] || continue
+ echo "executing '$script'" 1>&2
+ "$script"
+done
+END_SCRIPT
+chmod +x "$fname"
+"""
+
+
# @datadictionary: this is legacy path for placing files from metadata
# per the SmartOS location. It is not preferable, but is done for
# legacy reasons
@@ -120,8 +161,7 @@ class DataSourceSmartOS(sources.DataSource):
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
self.script_base_d = os.path.join(self.paths.get_cpath("scripts"))
- self.user_script_d = os.path.join(self.paths.get_cpath("scripts"),
- 'per-boot')
+ self.data_d = os.path.join(self.paths.instance_link, 'data')
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -140,7 +180,7 @@ class DataSourceSmartOS(sources.DataSource):
LOG.debug("No dmidata utility found")
return False
- system_uuid, system_type = dmi_info
+ system_uuid, system_type = tuple(dmi_info)
if 'smartdc' not in system_type.lower():
LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
return False
@@ -163,11 +203,18 @@ class DataSourceSmartOS(sources.DataSource):
# to a file in the filesystem of the guest on each boot and then
# executed. It may be of any format that would be considered
# executable in the guest instance.
- u_script = md.get('user-script')
- u_script_f = "%s/99_user_script" % self.user_script_d
+ #
+ # We write 'user-script' and 'operator-script' into the
+ # instance/data directory. The default vendor-data then handles
+ # executing them later.
+ user_script = os.path.join(self.data_d, 'user-script')
u_script_l = "%s/user-script" % LEGACY_USER_D
- write_boot_content(u_script, u_script_f, link=u_script_l, shebang=True,
- mode=0700)
+ write_boot_content(md.get('user-script'), content_f=user_script,
+ link=u_script_l, shebang=True, mode=0700)
+
+ operator_script = os.path.join(self.data_d, 'operator-script')
+ write_boot_content(md.get('operator-script'),
+ content_f=operator_script, shebang=False, mode=0700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
@@ -186,9 +233,16 @@ class DataSourceSmartOS(sources.DataSource):
if md['user-data']:
ud = md['user-data']
+ if not md['vendor-data']:
+ md['vendor-data'] = BUILTIN_VENDOR_DATA % {
+ 'user_script': user_script,
+ 'operator_script': operator_script,
+ 'per_boot_d': os.path.join(self.paths.get_cpath("scripts"), 'per-boot'),
+ }
+
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
- self.vendordata_raw = md['vendordata']
+ self.vendordata_raw = md['vendor-data']
return True
def device_name_to_device(self, name):
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index ae427bb5..19282bac 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -23,10 +23,12 @@
#
import base64
-from cloudinit import helpers
+from cloudinit import helpers as c_helpers
+from cloudinit import stages
+from cloudinit import util
from cloudinit.sources import DataSourceSmartOS
-
-from mocker import MockerTestCase
+from cloudinit.settings import (PER_INSTANCE)
+from tests.unittests import helpers
import os
import os.path
import re
@@ -42,6 +44,7 @@ MOCK_RETURNS = {
'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
'sdc:datacenter_name': 'somewhere2',
'sdc:operator-script': '\n'.join(['bin/true', '']),
+ 'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
'user-data': '\n'.join(['something', '']),
'user-script': '\n'.join(['/bin/true', '']),
}
@@ -105,22 +108,37 @@ class MockSerial(object):
yield '\n'
-class TestSmartOSDataSource(MockerTestCase):
+class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
def setUp(self):
+ helpers.FilesystemMockingTestCase.setUp(self)
+
# makeDir comes from MockerTestCase
self.tmp = self.makeDir()
self.legacy_user_d = self.makeDir()
+ # If you should want to watch the logs...
+ self._log = None
+ self._log_file = None
+ self._log_handler = None
+
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
- self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.paths = c_helpers.Paths({'cloud_dir': self.tmp})
self.unapply = []
super(TestSmartOSDataSource, self).setUp()
def tearDown(self):
+ helpers.FilesystemMockingTestCase.tearDown(self)
+ if self._log_handler and self._log:
+ self._log.removeHandler(self._log_handler)
apply_patches([i for i in reversed(self.unapply)])
super(TestSmartOSDataSource, self).tearDown()
+ def _patchIn(self, root):
+ self.restore()
+ self.patchOS(root)
+ self.patchUtils(root)
+
def apply_patches(self, patches):
ret = apply_patches(patches)
self.unapply += ret
@@ -327,10 +345,10 @@ class TestSmartOSDataSource(MockerTestCase):
there is no script remaining.
"""
- script_d = os.path.join(self.tmp, "scripts", "per-boot")
+ script_d = os.path.join(self.tmp, "instance", "data")
os.makedirs(script_d)
- test_script_f = "%s/99_user_script" % script_d
+ test_script_f = os.path.join(script_d, 'user-script')
with open(test_script_f, 'w') as f:
f.write("TEST DATA")
@@ -375,6 +393,25 @@ class TestSmartOSDataSource(MockerTestCase):
self.assertFalse(found_new)
+ def test_vendor_data_not_default(self):
+ dsrc = self._get_ds(mockdata=MOCK_RETURNS)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEquals(MOCK_RETURNS['sdc:vendor-data'],
+ dsrc.metadata['vendor-data'])
+
+ def test_default_vendor_data(self):
+ my_returns = MOCK_RETURNS.copy()
+ def_op_script = my_returns['sdc:vendor-data']
+ del my_returns['sdc:vendor-data']
+ dsrc = self._get_ds(mockdata=my_returns)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertNotEquals(def_op_script, dsrc.metadata['vendor-data'])
+
+ # we expect default vendor-data is a boothook
+ self.assertTrue(dsrc.vendordata_raw.startswith("#cloud-boothook"))
+
def test_disable_iptables_flag(self):
dsrc = self._get_ds(mockdata=MOCK_RETURNS)
ret = dsrc.get_data()