From a5727fe1477c9cc4288d1ac41f70bd1ab7d7928a Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Wed, 8 Jan 2014 17:16:24 -0700 Subject: Significant re-working of the userdata handling and introduction of vendordata. Vendordata is a datasource provided userdata-like blob that is parsed similiarly to userdata, execept at the user's pleasure. cloudinit/config/cc_scripts_vendor.py: added vendor script cloud config cloudinit/config/cc_vendor_scripts_per_boot.py: added vendor per boot cloud config cloudinit/config/cc_vendor_scripts_per_instance.py: added vendor per instance vendor cloud config cloudinit/config/cc_vendor_scripts_per_once.py: added per once vendor cloud config script doc/examples/cloud-config-vendor-data.txt: documentation of vendor-data examples doc/vendordata.txt: documentation of vendordata for vendors (RENAMED) tests/unittests/test_userdata.py => tests/unittests/test_userdata.py TO: tests/unittests/test_userdata.py => tests/unittests/test_data.py: userdata test cases are not expanded to confirm superiority over vendor data. bin/cloud-init: change instances of 'consume_userdata' to 'consume_data' cloudinit/handlers/cloud_config.py: Added vendor script handling to default cloud-config modules cloudinit/handlers/shell_script.py: Added ability to change the path key to support vendor provided 'vendor-scripts'. Defaults to 'script'. cloudinit/helpers.py: - Changed ConfigMerger to include handling of vendordata. - Changed helpers to include paths for vendordata. cloudinit/sources/__init__.py: Added functions for helping vendordata - get_vendordata_raw(): returns vendordata unprocessed - get_vendordata(): returns vendordata through userdata processor - has_vendordata(): indicator if vendordata is present - consume_vendordata(): datasource directive for indicating explict user approval of vendordata consumption. Defaults to 'false' cloudinit/stages.py: Re-jiggered for handling of vendordata - _initial_subdirs(): added vendor script definition - update(): added self._store_vendordata() - [ADDED] _store_vendordata(): store vendordata - _get_default_handlers(): modified to allow for filtering which handlers will run against vendordata - [ADDED] _do_handlers(): moved logic from consume_userdata to _do_handlers(). This allows _consume_vendordata() and _consume_userdata() to use the same code path. - [RENAMED] consume_userdata() to _consume_userdata() - [ADDED] _consume_vendordata() for handling vendordata - run after userdata to get user cloud-config - uses ConfigMerger to get the configuration from the instance perspective about whether or not to use vendordata - [ADDED] consume_data() to call _consume_{user,vendor}data cloudinit/util.py: - [ADDED] get_nested_option_as_list() used by cc_vendor* for getting a nested value from a dict and returned as a list - runparts(): added 'exe_prefix' for running exe with a prefix, used by cc_vendor* config/cloud.cfg: Added vendor script execution as default tests/unittests/test_runs/test_merge_run.py: changed consume_userdata() to consume_data() tests/unittests/test_runs/test_simple_run.py: changed consume_userdata() to consume_data() --- doc/examples/cloud-config-vendor-data.txt | 16 ++++++ doc/vendordata.txt | 93 +++++++++++++++++++++++++++++++ 2 files changed, 109 insertions(+) create mode 100644 doc/examples/cloud-config-vendor-data.txt create mode 100644 doc/vendordata.txt (limited to 'doc') diff --git a/doc/examples/cloud-config-vendor-data.txt b/doc/examples/cloud-config-vendor-data.txt new file mode 100644 index 00000000..7f90847b --- /dev/null +++ b/doc/examples/cloud-config-vendor-data.txt @@ -0,0 +1,16 @@ +#cloud-config +# +# This explains how to control vendordata via a cloud-config +# +# On select Datasources, vendors have a channel for the consumptions +# of all support user-data types via a special channel called +# vendordata. Users of the end system are given ultimate control. +# +vendor_data: + enabled: True + prefix: /usr/bin/ltrace + +# enabled: whether it is enabled or not +# prefix: the command to run before any vendor scripts. +# Note: this is a fairly weak method of containment. It should +# be used to profile a script, not to prevent its run diff --git a/doc/vendordata.txt b/doc/vendordata.txt new file mode 100644 index 00000000..63a6c999 --- /dev/null +++ b/doc/vendordata.txt @@ -0,0 +1,93 @@ +=== Overview === +Vendordata is data provided by the entity that launches an instance. +The cloud provider makes this data available to the instance via in one +way or another. + +Vendordata follows the same rules as user-data, with the following +caveauts: + 1. Users have ultimate control over vendordata + 2. By default it only runs on first boot + 3. Vendordata runs at the users pleasure. If the use of + vendordata is required for the instance to run, then + vendordata should not be used. + 4. Most vendor operations should be done either via script, + boot_hook or upstart job. + +Vendors utilizing the vendordata channel are strongly advised to +use the #cloud-config-jsonp method, otherwise they risk that a +user can accidently override choices. + +Further, we strongly advise vendors to not 'be evil'. By evil, we +mean any action that could compromise a system. Since users trust +you, please take care to make sure that any vendordata is safe, +atomic, indopenant and does not put your users at risk. + +cloud-init can read this input and act on it in different ways. + +=== Input Formats === +cloud-init will download and cache to filesystem any vendor-data that it +finds. However, certain types of vendor-data are handled specially. + + * Gzip Compressed Content + content found to be gzip compressed will be uncompressed, and + these rules applied to the uncompressed data + + * Mime Multi Part archive + This list of rules is applied to each part of this multi-part file + Using a mime-multi part file, the user can specify more than one + type of data. For example, both a user data script and a + cloud-config type could be specified. + + * vendor-data Script + begins with: #! or Content-Type: text/x-shellscript + script will be executed at "rc.local-like" level during first boot. + rc.local-like means "very late in the boot sequence" + + * Include File + begins with #include or Content-Type: text/x-include-url + This content is a "include" file. The file contains a list of + urls, one per line. Each of the URLs will be read, and their content + will be passed through this same set of rules. Ie, the content + read from the URL can be gzipped, mime-multi-part, or plain text + +* Include File Once + begins with #include-once or Content-Type: text/x-include-once-url + This content is a "include" file. The file contains a list of + urls, one per line. Each of the URLs will be read, and their content + will be passed through this same set of rules. Ie, the content + read from the URL can be gzipped, mime-multi-part, or plain text + This file will just be downloaded only once per instance, and its + contents cached for subsequent boots. This allows you to pass in + one-time-use or expiring URLs. + + * Cloud Config Data + begins with #cloud-config or Content-Type: text/cloud-config + + This content is "cloud-config" data. See the examples for a + commented example of supported config formats. + + * Upstart Job + begins with #upstart-job or Content-Type: text/upstart-job + + Content is placed into a file in /etc/init, and will be consumed + by upstart as any other upstart job. + + * Cloud Boothook + begins with #cloud-boothook or Content-Type: text/cloud-boothook + + This content is "boothook" data. It is stored in a file under + /var/lib/cloud and then executed immediately. + + This is the earliest "hook" available. Note, that there is no + mechanism provided for running only once. The boothook must take + care of this itself. It is provided with the instance id in the + environment variable "INSTANCE_ID". This could be made use of to + provide a 'once-per-instance' + +=== Examples === +There are examples in the examples subdirectory. +Additionally, the 'tools' directory contains 'write-mime-multipart', +which can be used to easily generate mime-multi-part files from a list +of input files. That data can then be given to an instance. + +See 'write-mime-multipart --help' for usage. -- cgit v1.2.3 From 91d15b934b655fc56af416309ee020caac24ca3f Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 17 Jan 2014 08:27:09 -0700 Subject: pep8 and pylint fixes; typo fix for documentation --- cloudinit/stages.py | 11 +++++------ doc/vendordata.txt | 2 +- tests/unittests/test_data.py | 4 +--- 3 files changed, 7 insertions(+), 10 deletions(-) (limited to 'doc') diff --git a/cloudinit/stages.py b/cloudinit/stages.py index 5dced998..8ae0bc0d 100644 --- a/cloudinit/stages.py +++ b/cloudinit/stages.py @@ -26,8 +26,7 @@ import copy import os import sys -from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES, - CLOUD_CONFIG) +from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG) from cloudinit import handlers @@ -510,13 +509,13 @@ class Init(object): vdcfg = {'enabled': False} LOG.warn("invalid 'vendor_data' setting. resetting to: %s", vdcfg) - if not util.is_true(vdcfg.get('enabled')): - LOG.debug("vendordata consumption is disabled.") - return - enabled = vdcfg.get('enabled') no_handlers = vdcfg.get('disabled_handlers', None) + if not util.is_true(enabled): + LOG.debug("vendordata consumption is disabled.") + return + LOG.debug("vendor data will be consumed. disabled_handlers=%s", no_handlers) diff --git a/doc/vendordata.txt b/doc/vendordata.txt index 63a6c999..530e3b4c 100644 --- a/doc/vendordata.txt +++ b/doc/vendordata.txt @@ -20,7 +20,7 @@ user can accidently override choices. Further, we strongly advise vendors to not 'be evil'. By evil, we mean any action that could compromise a system. Since users trust you, please take care to make sure that any vendordata is safe, -atomic, indopenant and does not put your users at risk. +atomic, idempotent and does not put your users at risk. cloud-init can read this input and act on it in different ways. diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py index a753debf..68729c57 100644 --- a/tests/unittests/test_data.py +++ b/tests/unittests/test_data.py @@ -264,13 +264,11 @@ vendor_data: freq=PER_INSTANCE) mods = stages.Modules(initer) (_which_ran, _failures) = mods.run_section('cloud_init_modules') - cfg = mods.cfg + _cfg = mods.cfg vendor_script = initer.paths.get_ipath_cur('vendor_scripts') vendor_script_fns = "%s%s/part-001" % (new_root, vendor_script) self.assertTrue(os.path.exists(vendor_script_fns)) - - def test_merging_cloud_config(self): blob = ''' #cloud-config -- cgit v1.2.3 From 98fd17c55b637f4e1d136c954567c1d9b23e6c20 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 11:16:56 -0500 Subject: remove support for resizing via 'parted resizepart' This was previously broken anyway. It doesn't seem like there was an easy way to actually support it, so for now I'm removing it entirely. growpart works well enough. --- ChangeLog | 2 + cloudinit/config/cc_growpart.py | 28 +---------- doc/examples/cloud-config-growpart.txt | 4 +- .../test_handler/test_handler_growpart.py | 55 ++-------------------- 4 files changed, 9 insertions(+), 80 deletions(-) (limited to 'doc') diff --git a/ChangeLog b/ChangeLog index 1c240c68..46a27df3 100644 --- a/ChangeLog +++ b/ChangeLog @@ -11,6 +11,8 @@ unicode). - config/cloud.cfg.d/05_logging.cfg: provide a default 'output' setting, to redirect cloud-init stderr and stdout /var/log/cloud-init-output.log. + - drop support for resizing partitions with parted entirely (LP: #1212492). + This was broken as it was anyway. 0.7.4: - fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a partitioned block device with target filesystem on ephemeral0.1. diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py index 0dd92a46..6bddf847 100644 --- a/cloudinit/config/cc_growpart.py +++ b/cloudinit/config/cc_growpart.py @@ -80,30 +80,6 @@ class ResizeFailedException(Exception): pass -class ResizeParted(object): - def available(self): - myenv = os.environ.copy() - myenv['LANG'] = 'C' - - try: - (out, _err) = util.subp(["parted", "--help"], env=myenv) - if re.search(r"COMMAND.*resizepart\s+", out, re.DOTALL): - return True - - except util.ProcessExecutionError: - pass - return False - - def resize(self, diskdev, partnum, partdev): - before = get_size(partdev) - try: - util.subp(["parted", diskdev, "resizepart", partnum]) - except util.ProcessExecutionError as e: - raise ResizeFailedException(e) - - return (before, get_size(partdev)) - - class ResizeGrowPart(object): def available(self): myenv = os.environ.copy() @@ -279,6 +255,4 @@ def handle(_name, cfg, _cloud, log, _args): else: log.debug("'%s' %s: %s" % (entry, action, msg)) -# LP: 1212444 FIXME re-order and favor ResizeParted -#RESIZERS = (('growpart', ResizeGrowPart),) -RESIZERS = (('growpart', ResizeGrowPart), ('parted', ResizeParted)) +RESIZERS = (('growpart', ResizeGrowPart),) diff --git a/doc/examples/cloud-config-growpart.txt b/doc/examples/cloud-config-growpart.txt index a459573d..393d5164 100644 --- a/doc/examples/cloud-config-growpart.txt +++ b/doc/examples/cloud-config-growpart.txt @@ -5,12 +5,10 @@ # # mode: # values: -# * auto: use any option possible (growpart or parted) +# * auto: use any option possible (any available) # if none are available, do not warn, but debug. # * growpart: use growpart to grow partitions # if growpart is not available, this is an error. -# * parted: use parted (parted resizepart) to resize partitions -# if parted is not available, this is an error. # * off, false # # devices: diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py index c0497e08..996526d3 100644 --- a/tests/unittests/test_handler/test_handler_growpart.py +++ b/tests/unittests/test_handler/test_handler_growpart.py @@ -12,50 +12,9 @@ import re import unittest # growpart: -# mode: auto # off, on, auto, 'growpart', 'parted' +# mode: auto # off, on, auto, 'growpart' # devices: ['root'] -HELP_PARTED_NO_RESIZE = """ -Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] -Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in -interactive mode. - -OPTIONs: - - -COMMANDs: - - quit exit program - rescue START END rescue a lost partition near START - and END - resize NUMBER START END resize partition NUMBER and its file - system - rm NUMBER delete partition NUMBER - -Report bugs to bug-parted@gnu.org -""" - -HELP_PARTED_RESIZE = """ -Usage: parted [OPTION]... [DEVICE [COMMAND [PARAMETERS]...]...] -Apply COMMANDs with PARAMETERS to DEVICE. If no COMMAND(s) are given, run in -interactive mode. - -OPTIONs: - - -COMMANDs: - - quit exit program - rescue START END rescue a lost partition near START - and END - resize NUMBER START END resize partition NUMBER and its file - system - resizepart NUMBER END resize partition NUMBER - rm NUMBER delete partition NUMBER - -Report bugs to bug-parted@gnu.org -""" - HELP_GROWPART_RESIZE = """ growpart disk partition rewrite partition table so that partition takes up all the space it can @@ -122,11 +81,8 @@ class TestConfig(MockerTestCase): # Order must be correct self.mocker.order() - @unittest.skip("until LP: #1212444 fixed") def test_no_resizers_auto_is_fine(self): subp = self.mocker.replace(util.subp, passthrough=False) - subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_NO_RESIZE, "")) subp(['growpart', '--help'], env={'LANG': 'C'}) self.mocker.result((HELP_GROWPART_NO_RESIZE, "")) self.mocker.replay() @@ -144,15 +100,14 @@ class TestConfig(MockerTestCase): self.assertRaises(ValueError, self.handle, self.name, config, self.cloud_init, self.log, self.args) - @unittest.skip("until LP: #1212444 fixed") - def test_mode_auto_prefers_parted(self): + def test_mode_auto_prefers_growpart(self): subp = self.mocker.replace(util.subp, passthrough=False) - subp(['parted', '--help'], env={'LANG': 'C'}) - self.mocker.result((HELP_PARTED_RESIZE, "")) + subp(['growpart', '--help'], env={'LANG': 'C'}) + self.mocker.result((HELP_GROWPART_RESIZE, "")) self.mocker.replay() ret = cc_growpart.resizer_factory(mode="auto") - self.assertTrue(isinstance(ret, cc_growpart.ResizeParted)) + self.assertTrue(isinstance(ret, cc_growpart.ResizeGrowPart)) def test_handle_with_no_growpart_entry(self): #if no 'growpart' entry in config, then mode=auto should be used -- cgit v1.2.3 From fd5c4c76746cf66df35d7860a6fa2f86454c7596 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 13:09:31 -0500 Subject: doc improvements --- doc/vendordata.txt | 108 +++++++++++++++++------------------------------------ 1 file changed, 34 insertions(+), 74 deletions(-) (limited to 'doc') diff --git a/doc/vendordata.txt b/doc/vendordata.txt index 530e3b4c..9acbe41c 100644 --- a/doc/vendordata.txt +++ b/doc/vendordata.txt @@ -1,88 +1,48 @@ === Overview === -Vendordata is data provided by the entity that launches an instance. -The cloud provider makes this data available to the instance via in one -way or another. +Vendordata is data provided by the entity that launches an instance +(for example, the cloud provider). This data can be used to +customize the image to fit into the particular environment it is +being run in. Vendordata follows the same rules as user-data, with the following -caveauts: - 1. Users have ultimate control over vendordata +caveats: + 1. Users have ultimate control over vendordata. They can disable its + execution or disable handling of specific parts of multipart input. 2. By default it only runs on first boot - 3. Vendordata runs at the users pleasure. If the use of - vendordata is required for the instance to run, then - vendordata should not be used. - 4. Most vendor operations should be done either via script, - boot_hook or upstart job. - -Vendors utilizing the vendordata channel are strongly advised to -use the #cloud-config-jsonp method, otherwise they risk that a -user can accidently override choices. - + 3. Vendordata can be disabled by the user. If the use of vendordata is + required for the instance to run, then vendordata should not be + used. + 4. user supplied cloud-config is merged over cloud-config from + vendordata. + +Users providing cloud-config data can use the '#cloud-config-jsonp' method +to more finely control their modifications to the vendor supplied +cloud-config. For example, if both vendor and user have provided +'runcnmd' then the default merge handler will cause the user's runcmd to +override the one provided by the vendor. To append to 'runcmd', the user +could better provide multipart input with a cloud-config-jsonp part like: + #cloud-config-jsonp + [{ "op": "add", "path": "/runcmd", "value": ["my", "command", "here"]}] + Further, we strongly advise vendors to not 'be evil'. By evil, we mean any action that could compromise a system. Since users trust you, please take care to make sure that any vendordata is safe, atomic, idempotent and does not put your users at risk. -cloud-init can read this input and act on it in different ways. - === Input Formats === cloud-init will download and cache to filesystem any vendor-data that it -finds. However, certain types of vendor-data are handled specially. - - * Gzip Compressed Content - content found to be gzip compressed will be uncompressed, and - these rules applied to the uncompressed data - - * Mime Multi Part archive - This list of rules is applied to each part of this multi-part file - Using a mime-multi part file, the user can specify more than one - type of data. For example, both a user data script and a - cloud-config type could be specified. - - * vendor-data Script - begins with: #! or Content-Type: text/x-shellscript - script will be executed at "rc.local-like" level during first boot. - rc.local-like means "very late in the boot sequence" - - * Include File - begins with #include or Content-Type: text/x-include-url - This content is a "include" file. The file contains a list of - urls, one per line. Each of the URLs will be read, and their content - will be passed through this same set of rules. Ie, the content - read from the URL can be gzipped, mime-multi-part, or plain text - -* Include File Once - begins with #include-once or Content-Type: text/x-include-once-url - This content is a "include" file. The file contains a list of - urls, one per line. Each of the URLs will be read, and their content - will be passed through this same set of rules. Ie, the content - read from the URL can be gzipped, mime-multi-part, or plain text - This file will just be downloaded only once per instance, and its - contents cached for subsequent boots. This allows you to pass in - one-time-use or expiring URLs. - - * Cloud Config Data - begins with #cloud-config or Content-Type: text/cloud-config - - This content is "cloud-config" data. See the examples for a - commented example of supported config formats. - - * Upstart Job - begins with #upstart-job or Content-Type: text/upstart-job - - Content is placed into a file in /etc/init, and will be consumed - by upstart as any other upstart job. - - * Cloud Boothook - begins with #cloud-boothook or Content-Type: text/cloud-boothook - - This content is "boothook" data. It is stored in a file under - /var/lib/cloud and then executed immediately. - - This is the earliest "hook" available. Note, that there is no - mechanism provided for running only once. The boothook must take - care of this itself. It is provided with the instance id in the - environment variable "INSTANCE_ID". This could be made use of to - provide a 'once-per-instance' +finds. Vendordata is handled exactly like user-data. That means that +the vendor can supply multipart input and have those parts acted on +in the same way as user-data. + +The only differences are: + * user-scripts are stored in a different location than user-scripts (to + avoid namespace collision) + * user can disable part handlers by cloud-config settings. + For example, to disable handling of 'part-handlers' in vendor-data, + the user could provide user-data like this: + #cloud-config + vendordata: {excluded: 'text/part-handler'} === Examples === There are examples in the examples subdirectory. -- cgit v1.2.3 From cbc5af7396743e014f1ad9ece0bb56820d26f484 Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 17 Jan 2014 17:08:58 -0500 Subject: more boto removal. move httpretty from 'Requires' the Requires would get that string rendered into the package's Depends/Requires (rather than BuildDepends/BuildRequires). We should have BuildDepends/BuildRequires too, but since trunk's package builds do not run 'make test', this isn't a big deal. This also adds 'test-requires' for httpretty. --- Requires | 3 --- cloudinit/ec2_utils.py | 3 --- doc/rtd/topics/datasources.rst | 4 ---- packages/bddeb | 1 - packages/brpm | 2 -- packages/debian/copyright | 22 ---------------------- test-requires | 1 + 7 files changed, 1 insertion(+), 35 deletions(-) create mode 100644 test-requires (limited to 'doc') diff --git a/Requires b/Requires index e847506f..8f695c68 100644 --- a/Requires +++ b/Requires @@ -31,6 +31,3 @@ requests # For patching pieces of cloud-config together jsonpatch - -# For http testing (only needed for testing) -httpretty>=0.7.1 diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py index c86623bc..92a22747 100644 --- a/cloudinit/ec2_utils.py +++ b/cloudinit/ec2_utils.py @@ -49,9 +49,6 @@ def combine_url(base, add_on): # See: http://bit.ly/TyoUQs # -# Since boto metadata reader uses the old urllib which does not -# support ssl, we need to ahead and create our own reader which -# works the same as the boto one (for now). class MetadataMaterializer(object): def __init__(self, blob, base_url, caller): self._blob = blob diff --git a/doc/rtd/topics/datasources.rst b/doc/rtd/topics/datasources.rst index 5543ed34..cc0d0ede 100644 --- a/doc/rtd/topics/datasources.rst +++ b/doc/rtd/topics/datasources.rst @@ -130,10 +130,6 @@ To see which versions are supported from your cloud provider use the following U ... latest -**Note:** internally in cloudinit the `boto`_ library used to fetch the instance -userdata and instance metadata, feel free to check that library out, it provides -many other useful EC2 functionality. - --------------------------- Config Drive --------------------------- diff --git a/packages/bddeb b/packages/bddeb index f52eb55f..9552aa40 100755 --- a/packages/bddeb +++ b/packages/bddeb @@ -29,7 +29,6 @@ import argparse # file pypi package name to a debian/ubuntu package name. PKG_MP = { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch | python-json-patch', diff --git a/packages/brpm b/packages/brpm index 8c90a0ab..f8ba1db1 100755 --- a/packages/brpm +++ b/packages/brpm @@ -36,7 +36,6 @@ from cloudinit import util PKG_MP = { 'redhat': { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch', @@ -48,7 +47,6 @@ PKG_MP = { }, 'suse': { 'argparse': 'python-argparse', - 'boto': 'python-boto', 'cheetah': 'python-cheetah', 'configobj': 'python-configobj', 'jsonpatch': 'python-jsonpatch', diff --git a/packages/debian/copyright b/packages/debian/copyright index dc993525..f55bb7a3 100644 --- a/packages/debian/copyright +++ b/packages/debian/copyright @@ -27,25 +27,3 @@ License: GPL-3 The complete text of the GPL version 3 can be seen in /usr/share/common-licenses/GPL-3. - -Files: cloudinit/boto_utils.py -Copyright: 2006,2007, Mitch Garnaat http://garnaat.org/ -License: MIT - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the - "Software"), to deal in the Software without restriction, including - without limitation the rights to use, copy, modify, merge, publish, dis- - tribute, sublicense, and/or sell copies of the Software, and to permit - persons to whom the Software is furnished to do so, subject to the fol- - lowing conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- - ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT - SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, - WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - IN THE SOFTWARE. diff --git a/test-requires b/test-requires new file mode 100644 index 00000000..6cee1c44 --- /dev/null +++ b/test-requires @@ -0,0 +1 @@ +httpretty>=0.7.1 -- cgit v1.2.3 From 50076145457e5857896493cb5c063944da03772c Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Tue, 21 Jan 2014 16:49:44 -0500 Subject: cloud-config-landscape.txt: improve documentation --- doc/examples/cloud-config-landscape.txt | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'doc') diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt index e4d23cc9..74e07b62 100644 --- a/doc/examples/cloud-config-landscape.txt +++ b/doc/examples/cloud-config-landscape.txt @@ -6,6 +6,9 @@ # # Note: 'tags' should be specified as a comma delimited string # rather than a list. +# +# You can get example key/values by running 'landscape-config', +# answer question, then look at /etc/landscape/client.config landscape: client: url: "https://landscape.canonical.com/message-system" @@ -13,3 +16,7 @@ landscape: data_path: "/var/lib/landscape/client" http_proxy: "http://my.proxy.com/foobar" tags: "server,cloud" + computer_title = footitle + https_proxy = fooproxy + registration_key = fookey + account_name = fooaccount -- cgit v1.2.3 From fb55c1079375454d2a2a2f82c6c1812759eeb1f1 Mon Sep 17 00:00:00 2001 From: Ben Howard Date: Fri, 24 Jan 2014 12:29:04 -0700 Subject: Fixes for SmartOS datasource (LP: #1272115): 1. fixed conflation of user-data and cloud-init user-data. Cloud-init user-data is now namespaced as 'cloud-init:user-data'. 2. user-scripts are now fetched from the meta-data service each boot and executed as in the scripts directory 3. datacenter name is now namespaced as sdc:datacenter 4. user-scripts should be shebanged if there is no file magic --- cloudinit/sources/DataSourceSmartOS.py | 45 +++++++- cloudinit/util.py | 72 ++++++++++++ doc/sources/smartos/README.rst | 92 ++++++++++++--- tests/unittests/test_datasource/test_smartos.py | 145 ++++++++++++++++++++++-- 4 files changed, 322 insertions(+), 32 deletions(-) (limited to 'doc') diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py index 6593ce6e..6bd4a5c7 100644 --- a/cloudinit/sources/DataSourceSmartOS.py +++ b/cloudinit/sources/DataSourceSmartOS.py @@ -25,7 +25,9 @@ # requests on the console. For example, to get the hostname, you # would send "GET hostname" on /dev/ttyS1. # - +# Certain behavior is defined by the DataDictionary +# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html +# Comments with "@datadictionary" are snippets of the definition import base64 from cloudinit import log as logging @@ -43,10 +45,11 @@ SMARTOS_ATTRIB_MAP = { 'local-hostname': ('hostname', True), 'public-keys': ('root_authorized_keys', True), 'user-script': ('user-script', False), - 'user-data': ('user-data', False), + 'legacy-user-data': ('user-data', False), + 'user-data': ('cloud-init:user-data', False), 'iptables_disable': ('iptables_disable', True), 'motd_sys_info': ('motd_sys_info', True), - 'availability_zone': ('datacenter_name', True), + 'availability_zone': ('sdc:datacenter_name', True), 'vendordata': ('sdc:operator-script', False), } @@ -71,7 +74,11 @@ BUILTIN_DS_CONFIG = { 'seed_timeout': 60, 'no_base64_decode': ['root_authorized_keys', 'motd_sys_info', - 'iptables_disable'], + 'iptables_disable', + 'user-data', + 'user-script', + 'sdc:datacenter_name', + ], 'base64_keys': [], 'base64_all': False, 'disk_aliases': {'ephemeral0': '/dev/vdb'}, @@ -88,6 +95,11 @@ BUILTIN_CLOUD_CONFIG = { 'device': 'ephemeral0'}], } +# @datadictionary: this is legacy path for placing files from metadata +# per the SmartOS location. It is not preferable, but is done for +# legacy reasons +LEGACY_USER_D = "/var/db" + class DataSourceSmartOS(sources.DataSource): def __init__(self, sys_cfg, distro, paths): @@ -107,6 +119,9 @@ class DataSourceSmartOS(sources.DataSource): self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode') self.b64_keys = self.ds_cfg.get('base64_keys') self.b64_all = self.ds_cfg.get('base64_all') + self.script_base_d = os.path.join(self.paths.get_cpath("scripts")) + self.user_script_d = os.path.join(self.paths.get_cpath("scripts"), + 'per-boot') def __str__(self): root = sources.DataSource.__str__(self) @@ -144,14 +159,32 @@ class DataSourceSmartOS(sources.DataSource): smartos_noun, strip = attribute md[ci_noun] = self.query(smartos_noun, strip=strip) + # @datadictionary: This key has no defined format, but its value + # is written to the file /var/db/mdata-user-data on each boot prior + # to the phase that runs user-script. This file is not to be executed. + # This allows a configuration file of some kind to be injected into + # the machine to be consumed by the user-script when it runs. + u_script = md.get('user-script') + u_script_f = "%s/99_user_script" % self.user_script_d + u_script_l = "%s/user-script" % LEGACY_USER_D + util.write_content(u_script, u_script_f, link=u_script_l, + executable=True) + + # @datadictionary: This key may contain a program that is written + # to a file in the filesystem of the guest on each boot and then + # executed. It may be of any format that would be considered + # executable in the guest instance. + u_data = md.get('legacy-user-data') + u_data_f = "%s/mdata-user-data" % LEGACY_USER_D + util.write_content(u_data, u_data_f) + + # Handle the cloud-init regular meta if not md['local-hostname']: md['local-hostname'] = system_uuid ud = None if md['user-data']: ud = md['user-data'] - elif md['user-script']: - ud = md['user-script'] self.metadata = util.mergemanydict([md, self.metadata]) self.userdata_raw = ud diff --git a/cloudinit/util.py b/cloudinit/util.py index 77f9ab36..5f64cb69 100644 --- a/cloudinit/util.py +++ b/cloudinit/util.py @@ -1904,3 +1904,75 @@ def expand_dotted_devname(dotted): return toks else: return (dotted, None) + + +def write_executable_content(script, script_f): + """ + This writes executable content and ensures that the shebang + exists. + """ + write_file(script_f, script, mode=0700) + try: + cmd = ["file", "--brief", "--mime-type", script_f] + (f_type, _err) = subp(cmd) + + LOG.debug("script %s mime type is %s" % (script_f, f_type)) + + # if the magic is text/plain, re-write with the shebang + if f_type.strip() == "text/plain": + with open(script_f, 'w') as f: + f.write("#!/bin/bash\n") + f.write(script) + LOG.debug("added shebang to file %s" % script_f) + + except ProcessExecutionError as e: + logexc(LOG, "Failed to identify script type for %s" % script_f, e) + return False + + except IOError as e: + logexc(LOG, "Failed to add shebang to file %s" % script_f, e) + return False + + return True + + +def write_content(content, content_f, link=None, + executable=False): + """ + Write the content to content_f. Under the following rules: + 1. Backup previous content_f + 2. Write the contente + 3. If no content, remove the file + 4. If there is a link, create it + + @param content: what to write + @param content_f: the file name + @param backup_d: the directory to save the backup at + @param link: if defined, location to create a symlink to + @param executable: is the file executable + """ + + if content: + if not executable: + write_file(content_f, content, mode=0400) + else: + w = write_executable_content(content, content_f) + if not w: + LOG.debug("failed to write file to %s" % content_f) + return False + + if not content and os.path.exists(content_f): + os.unlink(content_f) + + if link: + try: + if os.path.islink(link): + os.unlink(link) + if content and os.path.exists(content_f): + ensure_dir(os.path.dirname(link)) + os.symlink(content_f, link) + except IOError as e: + logexc(LOG, "failed establishing content link", e) + return False + + return True diff --git a/doc/sources/smartos/README.rst b/doc/sources/smartos/README.rst index 8b63e520..e63f311f 100644 --- a/doc/sources/smartos/README.rst +++ b/doc/sources/smartos/README.rst @@ -16,11 +16,35 @@ responds with the status and if "SUCCESS" returns until a single ".\n". New versions of the SmartOS tooling will include support for base64 encoded data. -Userdata --------- - -In SmartOS parlance, user-data is a actually meta-data. This userdata can be -provided as key-value pairs. +Meta-data channels +------------------ + +Cloud-init supports three modes of delivering user/meta-data via the flexible +channels of SmartOS. + +* user-data is written to /var/db/user-data + - per the spec, user-data is for consumption by the end-user, not provisioning + tools + - cloud-init entirely ignores this channel other than writting it to disk + - removal of the meta-data key means that /var/db/user-data gets removed + - a backup of previous meta-data is maintained as /var/db/user-data. + - is the epoch time when cloud-init ran + +* user-script is written to /var/lib/cloud/scripts/per-boot/99_user_data + - this is executed each boot + - a link is created to /var/db/user-script + - previous versions of the user-script is written to + /var/lib/cloud/scripts/per-boot.backup/99_user_script.. + - is the epoch time when cloud-init ran. + - when the 'user-script' meta-data key goes missing, the user-script is + removed from the file system, although a backup is maintained. + - if the script is not shebanged (i.e. starts with #!), then + or is not an executable, cloud-init will add a shebang of "#!/bin/bash" + +* cloud-init:user-data is treated like on other Clouds. + - this channel is used for delivering _all_ cloud-init instructions + - scripts delivered over this channel must be well formed (i.e. must have + a shebang) Cloud-init supports reading the traditional meta-data fields supported by the SmartOS tools. These are: @@ -32,19 +56,49 @@ SmartOS tools. These are: Note: At this time iptables_disable and enable_motd_sys_info are read but are not actioned. -user-script ------------ - -SmartOS traditionally supports sending over a user-script for execution at the -rc.local level. Cloud-init supports running user-scripts as if they were -cloud-init user-data. In this sense, anything with a shell interpreter -directive will run. - -user-data and user-script -------------------------- - -In the event that a user defines the meta-data key of "user-data" it will -always supersede any user-script data. This is for consistency. +disabling user-script +--------------------- + +Cloud-init uses the per-boot script functionality to handle the execution +of the user-script. If you want to prevent this use a cloud-config of: + +#cloud-config +cloud_final_modules: + - scripts-per-once + - scripts-per-instance + - scripts-user + - ssh-authkey-fingerprints + - keys-to-console + - phone-home + - final-message + - power-state-change + +Alternatively you can use the json patch method +#cloud-config-jsonp +[ + { "op": "replace", + "path": "/cloud_final_modules", + "value": ["scripts-per-once", + "scripts-per-instance", + "scripts-user", + "ssh-authkey-fingerprints", + "keys-to-console", + "phone-home", + "final-message", + "power-state-change"] + } +] + +The default cloud-config includes "script-per-boot". Cloud-init will still +ingest and write the user-data but will not execute it, when you disable +the per-boot script handling. + +Note: Unless you have an explicit use-case, it is recommended that you not + disable the per-boot script execution, especially if you are using + any of the life-cycle management features of SmartOS. + +The cloud-config needs to be delivered over the cloud-init:user-data channel +in order for cloud-init to ingest it. base64 ------ @@ -54,6 +108,8 @@ are provided by SmartOS: * root_authorized_keys * enable_motd_sys_info * iptables_disable + * user-data + * user-script This list can be changed through system config of variable 'no_base64_decode'. diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py index 956767d8..ae427bb5 100644 --- a/tests/unittests/test_datasource/test_smartos.py +++ b/tests/unittests/test_datasource/test_smartos.py @@ -27,6 +27,10 @@ from cloudinit import helpers from cloudinit.sources import DataSourceSmartOS from mocker import MockerTestCase +import os +import os.path +import re +import stat import uuid MOCK_RETURNS = { @@ -35,7 +39,11 @@ MOCK_RETURNS = { 'disable_iptables_flag': None, 'enable_motd_sys_info': None, 'test-var1': 'some data', - 'user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']), + 'sdc:datacenter_name': 'somewhere2', + 'sdc:operator-script': '\n'.join(['bin/true', '']), + 'user-data': '\n'.join(['something', '']), + 'user-script': '\n'.join(['/bin/true', '']), } DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc') @@ -101,6 +109,7 @@ class TestSmartOSDataSource(MockerTestCase): def setUp(self): # makeDir comes from MockerTestCase self.tmp = self.makeDir() + self.legacy_user_d = self.makeDir() # patch cloud_dir, so our 'seed_dir' is guaranteed empty self.paths = helpers.Paths({'cloud_dir': self.tmp}) @@ -138,6 +147,7 @@ class TestSmartOSDataSource(MockerTestCase): sys_cfg['datasource'] = sys_cfg.get('datasource', {}) sys_cfg['datasource']['SmartOS'] = ds_cfg + self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)]) self.apply_patches([(mod, 'get_serial', _get_serial)]) self.apply_patches([(mod, 'dmi_data', _dmi_data)]) dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None, @@ -194,7 +204,7 @@ class TestSmartOSDataSource(MockerTestCase): # metadata provided base64_all of true my_returns = MOCK_RETURNS.copy() my_returns['base64_all'] = "true" - for k in ('hostname', 'user-data'): + for k in ('hostname', 'cloud-init:user-data'): my_returns[k] = base64.b64encode(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) @@ -202,7 +212,7 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], dsrc.userdata_raw) self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) @@ -213,9 +223,9 @@ class TestSmartOSDataSource(MockerTestCase): def test_b64_userdata(self): my_returns = MOCK_RETURNS.copy() - my_returns['b64-user-data'] = "true" + my_returns['b64-cloud-init:user-data'] = "true" my_returns['b64-hostname'] = "true" - for k in ('hostname', 'user-data'): + for k in ('hostname', 'cloud-init:user-data'): my_returns[k] = base64.b64encode(my_returns[k]) dsrc = self._get_ds(mockdata=my_returns) @@ -223,7 +233,8 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) self.assertEquals(MOCK_RETURNS['root_authorized_keys'], dsrc.metadata['public-keys']) @@ -238,13 +249,131 @@ class TestSmartOSDataSource(MockerTestCase): self.assertTrue(ret) self.assertEquals(MOCK_RETURNS['hostname'], dsrc.metadata['local-hostname']) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) def test_userdata(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) ret = dsrc.get_data() self.assertTrue(ret) - self.assertEquals(MOCK_RETURNS['user-data'], dsrc.userdata_raw) + self.assertEquals(MOCK_RETURNS['user-data'], + dsrc.metadata['legacy-user-data']) + self.assertEquals(MOCK_RETURNS['cloud-init:user-data'], + dsrc.userdata_raw) + + def test_sdc_scripts(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEquals(user_script_perm, '700') + + def test_scripts_shebanged(self): + dsrc = self._get_ds(mockdata=MOCK_RETURNS) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(MOCK_RETURNS['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEquals(shebang, "#!/bin/bash") + user_script_perm = oct(os.stat(legacy_script_f)[stat.ST_MODE])[-3:] + self.assertEquals(user_script_perm, '700') + + def test_scripts_shebang_not_added(self): + """ + Test that the SmartOS requirement that plain text scripts + are executable. This test makes sure that plain texts scripts + with out file magic have it added appropriately by cloud-init. + """ + + my_returns = MOCK_RETURNS.copy() + my_returns['user-script'] = '\n'.join(['#!/usr/bin/perl', + 'print("hi")', '']) + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertEquals(my_returns['user-script'], + dsrc.metadata['user-script']) + + legacy_script_f = "%s/user-script" % self.legacy_user_d + self.assertTrue(os.path.exists(legacy_script_f)) + self.assertTrue(os.path.islink(legacy_script_f)) + shebang = None + with open(legacy_script_f, 'r') as f: + shebang = f.readlines()[0].strip() + self.assertEquals(shebang, "#!/usr/bin/perl") + + def test_scripts_removed(self): + """ + Since SmartOS requires that the user script is fetched + each boot, we want to make sure that the information + is backed-up for user-review later. + + This tests the behavior of when a script is removed. It makes + sure that a) the previous script is backed-up; and 2) that + there is no script remaining. + """ + + script_d = os.path.join(self.tmp, "scripts", "per-boot") + os.makedirs(script_d) + + test_script_f = "%s/99_user_script" % script_d + with open(test_script_f, 'w') as f: + f.write("TEST DATA") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-script'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata['user-script']) + self.assertFalse(os.path.exists(test_script_f)) + + def test_userdata_removed(self): + """ + User-data in the SmartOS world is supposed to be written to a file + each and every boot. This tests to make sure that in the event the + legacy user-data is removed, the existing user-data is backed-up and + there is no /var/db/user-data left. + """ + + user_data_f = "%s/mdata-user-data" % self.legacy_user_d + with open(user_data_f, 'w') as f: + f.write("PREVIOUS") + + my_returns = MOCK_RETURNS.copy() + del my_returns['user-data'] + + dsrc = self._get_ds(mockdata=my_returns) + ret = dsrc.get_data() + self.assertTrue(ret) + self.assertFalse(dsrc.metadata.get('legacy-user-data')) + + found_new = False + for root, _dirs, files in os.walk(self.legacy_user_d): + for name in files: + name_f = os.path.join(root, name) + permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:] + if re.match(r'.*\/mdata-user-data$', name_f): + found_new = True + print name_f + self.assertEquals(permissions, '400') + + self.assertFalse(found_new) def test_disable_iptables_flag(self): dsrc = self._get_ds(mockdata=MOCK_RETURNS) -- cgit v1.2.3 From c92cd051a1d598f83de03c4135c800b17fd46a9a Mon Sep 17 00:00:00 2001 From: Scott Moser Date: Fri, 24 Jan 2014 14:47:28 -0500 Subject: pep8/pylint fixes tools/run-pep8 wasn't checking all python files. tools/run-pylint wasnt checking bin/cloud-init fixed resultant pep8 issues after finding them. --- cloudinit/distros/freebsd.py | 2 +- cloudinit/distros/net_util.py | 18 +++++++++--------- doc/rtd/conf.py | 3 ++- setup.py | 6 +++--- tools/run-pep8 | 11 +---------- tools/run-pylint | 2 +- 6 files changed, 17 insertions(+), 25 deletions(-) (limited to 'doc') diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py index f1650a77..d28860eb 100644 --- a/cloudinit/distros/freebsd.py +++ b/cloudinit/distros/freebsd.py @@ -2,7 +2,7 @@ # # Copyright (C) 2014 Harm Weites # -# Author: Harm Weites +# Author: Harm Weites # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3, as diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py index 5f60666d..b9bcfd8b 100644 --- a/cloudinit/distros/net_util.py +++ b/cloudinit/distros/net_util.py @@ -51,7 +51,7 @@ # # auto lo # iface lo inet loopback -# +# # auto eth0 # iface eth0 inet static # address 10.0.0.1 @@ -64,17 +64,17 @@ # { # "lo": { # "auto": true -# }, +# }, # "eth0": { -# "auto": true, +# "auto": true, # "dns-nameservers": [ -# "98.0.0.1", +# "98.0.0.1", # "98.0.0.2" -# ], -# "broadcast": "10.0.0.255", -# "netmask": "255.255.252.0", -# "bootproto": "static", -# "address": "10.0.0.1", +# ], +# "broadcast": "10.0.0.255", +# "netmask": "255.255.252.0", +# "bootproto": "static", +# "address": "10.0.0.1", # "gateway": "10.0.0.2" # } # } diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py index c9ae79f4..52a8f92b 100644 --- a/doc/rtd/conf.py +++ b/doc/rtd/conf.py @@ -1,4 +1,5 @@ -import sys, os +import os +import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the diff --git a/setup.py b/setup.py index 8d18b97e..9118e5f6 100755 --- a/setup.py +++ b/setup.py @@ -63,7 +63,7 @@ def tiny_p(cmd, capture=True): (out, err) = sp.communicate() ret = sp.returncode # pylint: disable=E1101 if ret not in [0]: - raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" + raise RuntimeError("Failed running %s [rc=%s] (%s, %s)" % (cmd, ret, out, err)) return (out, err) @@ -102,7 +102,7 @@ class InitsysInstallData(install): " specifying a init system!") % (", ".join(INITSYS_TYPES))) elif self.init_system: self.distribution.data_files.append( - (INITSYS_ROOTS[self.init_system], + (INITSYS_ROOTS[self.init_system], INITSYS_FILES[self.init_system])) # Force that command to reinitalize (with new file list) self.distribution.reinitialize_command('install_data', True) @@ -134,7 +134,7 @@ setuptools.setup(name='cloud-init', [f for f in glob('doc/examples/seed/*') if is_f(f)]), ], install_requires=read_requires(), - cmdclass = { + cmdclass={ # Use a subclass for install that handles # adding on the right init system configuration files 'install': InitsysInstallData, diff --git a/tools/run-pep8 b/tools/run-pep8 index 20e594bc..cfce5edd 100755 --- a/tools/run-pep8 +++ b/tools/run-pep8 @@ -1,15 +1,7 @@ #!/bin/bash -ci_files='cloudinit/*.py cloudinit/config/*.py' -test_files=$(find tests -name "*.py") -def_files="$ci_files $test_files" - if [ $# -eq 0 ]; then - files=( ) - for f in $def_files; do - [ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; } - files[${#files[@]}]=${f} - done + files=( bin/cloud-init $(find * -name "*.py" -type f) ) else files=( "$@" ); fi @@ -44,4 +36,3 @@ cmd=( echo -e "\nRunning 'cloudinit' pep8:" echo "${cmd[@]}" "${cmd[@]}" - diff --git a/tools/run-pylint b/tools/run-pylint index b74efda9..0b7c16d4 100755 --- a/tools/run-pylint +++ b/tools/run-pylint @@ -1,7 +1,7 @@ #!/bin/bash if [ $# -eq 0 ]; then - files=( $(find * -name "*.py" -type f) ) + files=( bin/cloud-init $(find * -name "*.py" -type f) ) else files=( "$@" ); fi -- cgit v1.2.3