summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ChangeLog193
-rw-r--r--Makefile27
-rw-r--r--Requires30
-rw-r--r--TODO31
-rwxr-xr-xbin/cloud-init474
-rwxr-xr-xcloud-init-cfg.py115
-rwxr-xr-xcloud-init-query.py56
-rwxr-xr-xcloud-init.py229
-rw-r--r--cloudinit/CloudConfig/__init__.py274
-rw-r--r--cloudinit/CloudConfig/cc_apt_pipelining.py53
-rw-r--r--cloudinit/CloudConfig/cc_apt_update_upgrade.py241
-rw-r--r--cloudinit/CloudConfig/cc_bootcmd.py48
-rw-r--r--cloudinit/CloudConfig/cc_chef.py119
-rw-r--r--cloudinit/CloudConfig/cc_final_message.py58
-rw-r--r--cloudinit/CloudConfig/cc_keys_to_console.py42
-rw-r--r--cloudinit/CloudConfig/cc_locale.py54
-rw-r--r--cloudinit/CloudConfig/cc_mcollective.py99
-rw-r--r--cloudinit/CloudConfig/cc_puppet.py108
-rw-r--r--cloudinit/CloudConfig/cc_resizefs.py108
-rw-r--r--cloudinit/CloudConfig/cc_rightscale_userdata.py78
-rw-r--r--cloudinit/CloudConfig/cc_ssh.py106
-rw-r--r--cloudinit/CloudConfig/cc_timezone.py67
-rw-r--r--cloudinit/CloudConfig/cc_update_etc_hosts.py87
-rw-r--r--cloudinit/CloudConfig/cc_update_hostname.py101
-rw-r--r--cloudinit/DataSource.py214
-rw-r--r--cloudinit/DataSourceCloudStack.py92
-rw-r--r--cloudinit/DataSourceConfigDrive.py231
-rw-r--r--cloudinit/DataSourceEc2.py217
-rw-r--r--cloudinit/DataSourceMAAS.py345
-rw-r--r--cloudinit/SshUtil.py227
-rw-r--r--cloudinit/UserDataHandler.py262
-rw-r--r--cloudinit/__init__.py654
-rw-r--r--cloudinit/cloud.py101
-rw-r--r--cloudinit/config/__init__.py56
-rw-r--r--cloudinit/config/cc_apt_pipelining.py59
-rw-r--r--cloudinit/config/cc_apt_update_upgrade.py272
-rw-r--r--cloudinit/config/cc_bootcmd.py55
-rw-r--r--cloudinit/config/cc_byobu.py (renamed from cloudinit/CloudConfig/cc_byobu.py)22
-rw-r--r--cloudinit/config/cc_ca_certs.py (renamed from cloudinit/CloudConfig/cc_ca_certs.py)47
-rw-r--r--cloudinit/config/cc_chef.py129
-rw-r--r--cloudinit/config/cc_disable_ec2_metadata.py36
-rw-r--r--cloudinit/config/cc_final_message.py68
-rw-r--r--cloudinit/config/cc_foo.py52
-rw-r--r--cloudinit/config/cc_grub_dpkg.py (renamed from cloudinit/CloudConfig/cc_grub_dpkg.py)27
-rw-r--r--cloudinit/config/cc_keys_to_console.py53
-rw-r--r--cloudinit/config/cc_landscape.py (renamed from cloudinit/CloudConfig/cc_landscape.py)50
-rw-r--r--cloudinit/config/cc_locale.py37
-rw-r--r--cloudinit/config/cc_mcollective.py91
-rw-r--r--cloudinit/config/cc_mounts.py (renamed from cloudinit/CloudConfig/cc_mounts.py)93
-rw-r--r--cloudinit/config/cc_phone_home.py (renamed from cloudinit/CloudConfig/cc_phone_home.py)84
-rw-r--r--cloudinit/config/cc_puppet.py113
-rw-r--r--cloudinit/config/cc_resizefs.py140
-rw-r--r--cloudinit/config/cc_rightscale_userdata.py102
-rw-r--r--cloudinit/config/cc_rsyslog.py (renamed from cloudinit/CloudConfig/cc_rsyslog.py)63
-rw-r--r--cloudinit/config/cc_runcmd.py (renamed from cloudinit/CloudConfig/cc_runcmd.py)18
-rw-r--r--cloudinit/config/cc_salt_minion.py (renamed from cloudinit/CloudConfig/cc_salt_minion.py)52
-rw-r--r--cloudinit/config/cc_scripts_per_boot.py (renamed from cloudinit/CloudConfig/cc_scripts_per_boot.py)21
-rw-r--r--cloudinit/config/cc_scripts_per_instance.py (renamed from cloudinit/CloudConfig/cc_scripts_per_instance.py)21
-rw-r--r--cloudinit/config/cc_scripts_per_once.py (renamed from cloudinit/CloudConfig/cc_scripts_per_once.py)21
-rw-r--r--cloudinit/config/cc_scripts_user.py (renamed from cloudinit/CloudConfig/cc_scripts_user.py)22
-rw-r--r--cloudinit/config/cc_set_hostname.py (renamed from cloudinit/CloudConfig/cc_set_hostname.py)23
-rw-r--r--cloudinit/config/cc_set_passwords.py (renamed from cloudinit/CloudConfig/cc_set_passwords.py)105
-rw-r--r--cloudinit/config/cc_ssh.py132
-rw-r--r--cloudinit/config/cc_ssh_import_id.py (renamed from cloudinit/CloudConfig/cc_ssh_import_id.py)31
-rw-r--r--cloudinit/config/cc_timezone.py39
-rw-r--r--cloudinit/config/cc_update_etc_hosts.py60
-rw-r--r--cloudinit/config/cc_update_hostname.py41
-rw-r--r--cloudinit/distros/__init__.py163
-rw-r--r--cloudinit/distros/debian.py149
-rw-r--r--cloudinit/distros/fedora.py (renamed from cloudinit/CloudConfig/cc_foo.py)16
-rw-r--r--cloudinit/distros/rhel.py337
-rw-r--r--cloudinit/distros/ubuntu.py (renamed from cloudinit/CloudConfig/cc_disable_ec2_metadata.py)19
-rw-r--r--cloudinit/handlers/__init__.py222
-rw-r--r--cloudinit/handlers/boot_hook.py73
-rw-r--r--cloudinit/handlers/cloud_config.py62
-rw-r--r--cloudinit/handlers/shell_script.py52
-rw-r--r--cloudinit/handlers/upstart_job.py66
-rw-r--r--cloudinit/helpers.py452
-rw-r--r--cloudinit/importer.py65
-rw-r--r--cloudinit/log.py133
-rw-r--r--cloudinit/netinfo.py103
-rw-r--r--cloudinit/settings.py57
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py147
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py226
-rw-r--r--cloudinit/sources/DataSourceEc2.py265
-rw-r--r--cloudinit/sources/DataSourceMAAS.py264
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py (renamed from cloudinit/DataSourceNoCloud.py)142
-rw-r--r--cloudinit/sources/DataSourceOVF.py (renamed from cloudinit/DataSourceOVF.py)237
-rw-r--r--cloudinit/sources/__init__.py223
-rw-r--r--cloudinit/ssh_util.py314
-rw-r--r--cloudinit/stages.py551
-rw-r--r--cloudinit/templater.py41
-rw-r--r--cloudinit/url_helper.py226
-rw-r--r--cloudinit/user_data.py243
-rw-r--r--cloudinit/util.py1636
-rw-r--r--cloudinit/version.py27
-rw-r--r--config/cloud.cfg38
-rw-r--r--config/cloud.cfg.d/05_logging.cfg6
-rwxr-xr-xdebian.trunk/rules29
-rwxr-xr-xinstall.sh31
-rwxr-xr-xpackages/bddeb172
-rwxr-xr-xpackages/brpm216
-rw-r--r--packages/debian/changelog (renamed from debian.trunk/changelog)2
-rw-r--r--packages/debian/compat (renamed from debian.trunk/compat)0
-rw-r--r--packages/debian/control (renamed from debian.trunk/control)10
-rw-r--r--packages/debian/copyright (renamed from debian.trunk/copyright)0
-rw-r--r--packages/debian/dirs (renamed from debian.trunk/dirs)0
-rw-r--r--packages/debian/pycompat (renamed from debian.trunk/pycompat)0
-rwxr-xr-xpackages/debian/rules17
-rwxr-xr-xpackages/make-dist-tarball (renamed from tools/make-dist-tarball)4
-rwxr-xr-xpackages/make-tarball89
-rw-r--r--packages/redhat/cloud-init.spec183
-rwxr-xr-xsetup.py117
-rwxr-xr-xsysvinit/cloud-config124
-rwxr-xr-xsysvinit/cloud-final124
-rwxr-xr-xsysvinit/cloud-init124
-rwxr-xr-xsysvinit/cloud-init-local124
-rw-r--r--templates/chef_client.rb.tmpl8
-rw-r--r--templates/default-locale.tmpl1
-rw-r--r--templates/hosts.redhat.tmpl22
-rw-r--r--templates/hosts.ubuntu.tmpl (renamed from templates/hosts.tmpl)15
-rw-r--r--templates/sources.list.tmpl101
-rw-r--r--tests/configs/sample1.yaml53
-rw-r--r--tests/unittests/test__init__.py162
-rw-r--r--tests/unittests/test_builtin_handlers.py54
-rw-r--r--tests/unittests/test_datasource/test_maas.py64
-rw-r--r--tests/unittests/test_handler/test_handler_ca_certs.py103
-rw-r--r--tests/unittests/test_userdata.py135
-rw-r--r--tests/unittests/test_util.py123
-rwxr-xr-xtools/bddeb33
-rwxr-xr-xtools/hacking.py175
-rwxr-xr-xtools/mock-meta.py444
-rwxr-xr-xtools/read-dependencies45
-rwxr-xr-xtools/read-version70
-rwxr-xr-xtools/run-pep835
-rwxr-xr-xtools/run-pylint13
-rw-r--r--upstart/cloud-config.conf2
-rw-r--r--upstart/cloud-final.conf2
-rw-r--r--upstart/cloud-init-local.conf2
-rw-r--r--upstart/cloud-init.conf2
140 files changed, 11102 insertions, 5859 deletions
diff --git a/ChangeLog b/ChangeLog
index 1f1160d5..c3f71b9c 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,196 @@
+0.7.0:
+ - unified binary that activates the various stages
+ - Now using argparse + subcommands to specify the various CLI options
+ - a stage module that clearly separates the stages of the different
+ components (also described how they are used and in what order in the
+ new unified binary)
+ - user_data is now a module that just does user data processing while the
+ actual activation and 'handling' of the processed user data is done via
+ a separate set of files (and modules) with the main 'init' stage being the
+ controller of this
+ - creation of boot_hook, cloud_config, shell_script, upstart_job version 2
+ modules (with classes that perform there functionality) instead of those
+ having functionality that is attached to the cloudinit object (which
+ reduces reuse and limits future functionality, and makes testing harder)
+ - removal of global config that defined paths, shared config, now this is
+ via objects making unit testing testing and global side-effects a non issue
+ - creation of a 'helpers.py'
+ - this contains an abstraction for the 'lock' like objects that the various
+ module/handler running stages use to avoid re-running a given
+ module/handler for a given frequency. this makes it separated from
+ the actual usage of that object (thus helpful for testing and clear lines
+ usage and how the actual job is accomplished)
+ - a common 'runner' class is the main entrypoint using these locks to
+ run function objects passed in (along with there arguments) and there
+ frequency
+ - add in a 'paths' object that provides access to the previously global
+ and/or config based paths (thus providing a single entrypoint object/type
+ that provides path information)
+ - this also adds in the ability to change the path when constructing
+ that path 'object' and adding in additional config that can be used to
+ alter the root paths of 'joins' (useful for testing or possibly useful
+ in chroots?)
+ - config options now avaiable that can alter the 'write_root' and the
+ 'read_root' when backing code uses the paths join() function
+ - add a config parser subclass that will automatically add unknown sections
+ and return default values (instead of throwing exceptions for these cases)
+ - a new config merging class that will be the central object that knows
+ how to do the common configuration merging from the various configuration
+ sources. The order is the following:
+ - cli config files override environment config files
+ which override instance configs which override datasource
+ configs which override base configuration which overrides
+ default configuration.
+ - remove the passing around of the 'cloudinit' object as a 'cloud' variable
+ and instead pass around an 'interface' object that can be given to modules
+ and handlers as there cloud access layer while the backing of that
+ object can be varied (good for abstraction and testing)
+ - use a single set of functions to do importing of modules
+ - add a function in which will search for a given set of module names with
+ a given set of attributes and return those which are found
+ - refactor logging so that instead of using a single top level 'log' that
+ instead each component/module can use its own logger (if desired), this
+ should be backwards compatible with handlers and config modules that used
+ the passed in logger (its still passed in)
+ - ensure that all places where exception are caught and where applicable
+ that the util logexc() is called, so that no exceptions that may occur
+ are dropped without first being logged (where it makes sense for this
+ to happen)
+ - add a 'requires' file that lists cloud-init dependencies
+ - applying it in package creation (bdeb and brpm) as well as using it
+ in the modified setup.py to ensure dependencies are installed when
+ using that method of packaging
+ - add a 'version.py' that lists the active version (in code) so that code
+ inside cloud-init can report the version in messaging and other config files
+ - cleanup of subprocess usage so that all subprocess calls go through the
+ subp() utility method, which now has an exception type that will provide
+ detailed information on python 2.6 and 2.7
+ - forced all code loading, moving, chmod, writing files and other system
+ level actions to go through standard set of util functions, this greatly
+ helps in debugging and determining exactly which system actions cloud-init is
+ performing
+ - switching out the templating engine cheetah for tempita since tempita has
+ no external dependencies (minus python) while cheetah has many dependencies
+ which makes it more difficult to adopt cloud-init in distros that may not
+ have those dependencies
+ - adjust url fetching and url trying to go through a single function that
+ reads urls in the new 'url helper' file, this helps in tracing, debugging
+ and knowing which urls are being called and/or posted to from with-in
+ cloud-init code
+ - add in the sending of a 'User-Agent' header for all urls fetched that
+ do not provide there own header mapping, derive this user-agent from
+ the following template, 'Cloud-Init/{version}' where the version is the
+ cloud-init version number
+ - using prettytable for netinfo 'debug' printing since it provides a standard
+ and defined output that should be easier to parse than a custom format
+ - add a set of distro specific classes, that handle distro specific actions
+ that modules and or handler code can use as needed, this is organized into
+ a base abstract class with child classes that implement the shared
+ functionality. config determines exactly which subclass to load, so it can
+ be easily extended as needed.
+ - current functionality
+ - network interface config file writing
+ - hostname setting/updating
+ - locale/timezone/ setting
+ - updating of /etc/hosts (with templates or generically)
+ - package commands (ie installing, removing)/mirror finding
+ - interface up/down activating
+ - implemented a debian + ubuntu subclass
+ - implemented a redhat + fedora subclass
+ - adjust the root 'cloud.cfg' file to now have distrobution/path specific
+ configuration values in it. these special configs are merged as the normal
+ config is, but the system level config is not passed into modules/handlers
+ - modules/handlers must go through the path and distro object instead
+ - have the cloudstack datasource test the url before calling into boto to
+ avoid the long wait for boto to finish retrying and finally fail when
+ the gateway meta-data address is unavailable
+ - add a simple mock ec2 meta-data python based http server that can serve a
+ very simple set of ec2 meta-data back to callers
+ - useful for testing or for understanding what the ec2 meta-data
+ service can provide in terms of data or functionality
+ - for ssh key and authorized key file parsing add in classes and util functions
+ that maintain the state of individual lines, allowing for a clearer
+ separation of parsing and modification (useful for testing and tracing)
+ - add a set of 'base' init.d scripts that can be used on systems that do
+ not have full upstart or systemd support (or support that does not match
+ the standard fedora/ubuntu implementation)
+ - currently these are being tested on RHEL 6.2
+ - separate the datasources into there own subdirectory (instead of being
+ a top-level item), this matches how config 'modules' and user-data 'handlers'
+ are also in there own subdirectory (thus helping new developers and others
+ understand the code layout in a quicker manner)
+ - add the building of rpms based off a new cli tool and template 'spec' file
+ that will templatize and perform the necessary commands to create a source
+ and binary package to be used with a cloud-init install on a 'rpm' supporting
+ system
+ - uses the new standard set of requires and converts those pypi requirements
+ into a local set of package requirments (that are known to exist on RHEL
+ systems but should also exist on fedora systems)
+ - adjust the bdeb builder to be a python script (instead of a shell script) and
+ make its 'control' file a template that takes in the standard set of pypi
+ dependencies and uses a local mapping (known to work on ubuntu) to create the
+ packages set of dependencies (that should also work on ubuntu-like systems)
+ - pythonify a large set of various pieces of code
+ - remove wrapping return statements with () when it has no effect
+ - upper case all constants used
+ - correctly 'case' class and method names (where applicable)
+ - use os.path.join (and similar commands) instead of custom path creation
+ - use 'is None' instead of the frowned upon '== None' which picks up a large
+ set of 'true' cases than is typically desired (ie for objects that have
+ there own equality)
+ - use context managers on locks, tempdir, chdir, file, selinux, umask,
+ unmounting commands so that these actions do not have to be closed and/or
+ cleaned up manually in finally blocks, which is typically not done and will
+ eventually be a bug in the future
+ - use the 'abc' module for abstract classes base where possible
+ - applied in the datasource root class, the distro root class, and the
+ user-data v2 root class
+ - when loading yaml, check that the 'root' type matches a predefined set of
+ valid types (typically just 'dict') and throw a type error if a mismatch
+ occurs, this seems to be a good idea to do when loading user config files
+ - when forking a long running task (ie resizing a filesytem) use a new util
+ function that will fork and then call a callback, instead of having to
+ implement all that code in a non-shared location (thus allowing it to be
+ used by others in the future)
+ - when writing out filenames, go through a util function that will attempt to
+ ensure that the given filename is 'filesystem' safe by replacing '/' with
+ '_' and removing characters which do not match a given whitelist of allowed
+ filename characters
+ - for the varying usages of the 'blkid' command make a function in the util
+ module that can be used as the single point of entry for interaction with
+ that command (and its results) instead of having X separate implementations
+ - place the rfc 8222 time formatting and uptime repeated pieces of code in the
+ util module as a set of function with the name 'time_rfc2822'/'uptime'
+ - separate the pylint+pep8 calling from one tool into two indivudal tools so
+ that they can be called independently, add make file sections that can be
+ used to call these independently
+ - remove the support for the old style config that was previously located in
+ '/etc/ec2-init/ec2-config.cfg', no longer supported!
+ - instead of using a altered config parser that added its own 'dummy' section
+ on in the 'mcollective' module, use configobj which handles the parsing of
+ config without sections better (and it also maintains comments instead of
+ removing them)
+ - use the new defaulting config parser (that will not raise errors on sections
+ that do not exist or return errors when values are fetched that do not exist)
+ in the 'puppet' module
+ - for config 'modules' add in the ability for the module to provide a list of
+ distro names which it is known to work with, if when ran and the distro being
+ used name does not match one of those in this list, a warning will be written
+ out saying that this module may not work correctly on this distrobution
+ - for all dynamically imported modules ensure that they are fixed up before
+ they are used by ensuring that they have certain attributes, if they do not
+ have those attributes they will be set to a sensible set of defaults instead
+ - adjust all 'config' modules and handlers to use the adjusted util functions
+ and the new distro objects where applicable so that those pieces of code can
+ benefit from the unified and enhanced functionality being provided in that
+ util module
+ - fix a potential bug whereby when a #includeonce was encountered it would
+ enable checking of urls against a cache, if later a #include was encountered
+ it would continue checking against that cache, instead of refetching (which
+ would likely be the expected case)
+ - add a openstack/nova based pep8 extension utility ('hacking.py') that allows
+ for custom checks (along with the standard pep8 checks) to occur when running
+ 'make pep8' and its derivatives
0.6.4:
- support relative path in AuthorizedKeysFile (LP: #970071).
- make apt-get update run with --quiet (suitable for logging) (LP: #1012613)
diff --git a/Makefile b/Makefile
index 0fc6c46b..a96d6b5b 100644
--- a/Makefile
+++ b/Makefile
@@ -1,14 +1,33 @@
+CWD=$(shell pwd)
+PY_FILES=$(shell find cloudinit bin -name "*.py")
+PY_FILES+="bin/cloud-init"
all: test
+pep8:
+ $(CWD)/tools/run-pep8 $(PY_FILES)
+
pylint:
- pylint cloudinit
+ $(CWD)/tools/run-pylint $(PY_FILES)
pyflakes:
- pyflakes .
+ pyflakes $(PY_FILES)
test:
- nosetests tests/unittests/
+ nosetests $(noseopts) tests/unittests/
+
+2to3:
+ 2to3 $(PY_FILES)
+
+clean:
+ rm -rf /var/log/cloud-init.log \
+ /var/lib/cloud/
+
+rpm:
+ cd packages && ./brpm
+
+deb:
+ cd packages && ./bddeb
-.PHONY: test pylint pyflakes
+.PHONY: test pylint pyflakes 2to3 clean pep8 rpm deb
diff --git a/Requires b/Requires
new file mode 100644
index 00000000..10be0155
--- /dev/null
+++ b/Requires
@@ -0,0 +1,30 @@
+# Pypi requirements for cloud-init to work
+
+# Used for templating any files or strings that are considered
+# to be templates, not cheetah since it pulls in alot of extra libs.
+# This one is pretty dinky and does want we want (var substituion)
+Tempita
+
+# This is used for any pretty printing of tabular data.
+PrettyTable
+
+# This one is currently only used by the MAAS datasource. If that
+# datasource is removed, this is no longer needed
+oauth
+
+# This is used to fetch the ec2 metadata into a easily
+# parseable format, instead of having to have cloud-init perform
+# those same fetchs and decodes and signing (...) that ec2 requires.
+boto
+
+# This is only needed for places where we need to support configs in a manner
+# that the built-in config parser is not sufficent (ie
+# when we need to preserve comments, or do not have a top-level
+# section)...
+configobj
+
+# All new style configurations are in the yaml format
+pyyaml
+
+# The new main entrypoint uses argparse instead of optparse
+argparse
diff --git a/TODO b/TODO
index 568bdb07..1725db00 100644
--- a/TODO
+++ b/TODO
@@ -1,14 +1,37 @@
-- consider 'failsafe' DataSource
+- Consider a 'failsafe' DataSource
If all others fail, setting a default that
- sets the user password, writing it to console
- logs to console that this happened
-- consider 'previous' DataSource
+- Consider a 'previous' DataSource
If no other data source is found, fall back to the 'previous' one
keep a indication of what instance id that is in /var/lib/cloud
-- rewrite "cloud-init-query"
- have DataSource and cloudinit expose explicit fields
+- Rewrite "cloud-init-query" (currently not implemented)
+ Possibly have DataSource and cloudinit expose explicit fields
- instance-id
- hostname
- mirror
- release
- ssh public keys
+- Remove the conversion of the ubuntu network interface format conversion
+ to a RH/fedora format and replace it with a top level format that uses
+ the netcf libraries format instead (which itself knows how to translate
+ into the specific formats)
+- Replace the 'apt*' modules with variants that now use the distro classes
+ to perform distro independent packaging commands (where possible)
+- Canonicalize the semaphore/lock name for modules and user data handlers
+ a. It is most likely a bug that currently exists that if a module in config
+ alters its name and it has already ran, then it will get ran again since
+ the lock name hasn't be canonicalized
+- Replace some the LOG.debug calls with a LOG.info where appropriate instead
+ of how right now there is really only 2 levels (WARN and DEBUG)
+- Remove the 'cc_' for config modules, either have them fully specified (ie
+ 'cloudinit.config.resizefs') or by default only look in the 'cloudinit.config'
+ for these modules (or have a combination of the above), this avoids having
+ to understand where your modules are coming from (which can be altered by
+ the current python inclusion path)
+- Depending on if people think the wrapper around 'os.path.join' provided
+ by the 'paths' object is useful (allowing us to modify based off a 'read'
+ and 'write' configuration based 'root') or is just to confusing, it might be
+ something to remove later, and just recommend using 'chroot' instead (or the X
+ different other options which are similar to 'chroot'), which is might be more
+ natural and less confusing...
diff --git a/bin/cloud-init b/bin/cloud-init
new file mode 100755
index 00000000..c7863db1
--- /dev/null
+++ b/bin/cloud-init
@@ -0,0 +1,474 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import argparse
+import os
+import sys
+import traceback
+
+# This is more just for running from the bin folder so that
+# cloud-init binary can find the cloudinit module
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+from cloudinit import log as logging
+from cloudinit import netinfo
+from cloudinit import sources
+from cloudinit import stages
+from cloudinit import templater
+from cloudinit import util
+from cloudinit import version
+
+from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
+ CLOUD_CONFIG)
+
+
+# Pretty little welcome message template
+WELCOME_MSG_TPL = ("Cloud-init v. {{version}} running '{{action}}' at "
+ "{{timestamp}}. Up {{uptime}} seconds.")
+
+# Module section template
+MOD_SECTION_TPL = "cloud_%s_modules"
+
+# Things u can query on
+QUERY_DATA_TYPES = [
+ 'data',
+ 'data_raw',
+ 'instance_id',
+]
+
+# Frequency shortname to full name
+# (so users don't have to remember the full name...)
+FREQ_SHORT_NAMES = {
+ 'instance': PER_INSTANCE,
+ 'always': PER_ALWAYS,
+ 'once': PER_ONCE,
+}
+
+LOG = logging.getLogger()
+
+
+# Used for when a logger may not be active
+# and we still want to print exceptions...
+def print_exc(msg=''):
+ if msg:
+ sys.stderr.write("%s\n" % (msg))
+ sys.stderr.write('-' * 60)
+ sys.stderr.write("\n")
+ traceback.print_exc(file=sys.stderr)
+ sys.stderr.write('-' * 60)
+ sys.stderr.write("\n")
+
+
+def welcome(action):
+ tpl_params = {
+ 'version': version.version_string(),
+ 'uptime': util.uptime(),
+ 'timestamp': util.time_rfc2822(),
+ 'action': action,
+ }
+ tpl_msg = templater.render_string(WELCOME_MSG_TPL, tpl_params)
+ util.multi_log("%s\n" % (tpl_msg),
+ console=False, stderr=True)
+
+
+def extract_fns(args):
+ # Files are already opened so lets just pass that along
+ # since it would of broke if it couldn't have
+ # read that file already...
+ fn_cfgs = []
+ if args.files:
+ for fh in args.files:
+ # The realpath is more useful in logging
+ # so lets resolve to that...
+ fn_cfgs.append(os.path.realpath(fh.name))
+ return fn_cfgs
+
+
+def run_module_section(mods, action_name, section):
+ full_section_name = MOD_SECTION_TPL % (section)
+ (which_ran, failures) = mods.run_section(full_section_name)
+ total_attempted = len(which_ran) + len(failures)
+ if total_attempted == 0:
+ msg = ("No '%s' modules to run"
+ " under section '%s'") % (action_name, full_section_name)
+ sys.stderr.write("%s\n" % (msg))
+ LOG.debug(msg)
+ return 0
+ else:
+ LOG.debug("Ran %s modules with %s failures",
+ len(which_ran), len(failures))
+ return len(failures)
+
+
+def main_init(name, args):
+ deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+ if args.local:
+ deps = [sources.DEP_FILESYSTEM]
+
+ if not args.local:
+ # See doc/kernel-cmdline.txt
+ #
+ # This is used in maas datasource, in "ephemeral" (read-only root)
+ # environment where the instance netboots to iscsi ro root.
+ # and the entity that controls the pxe config has to configure
+ # the maas datasource.
+ #
+ # Could be used elsewhere, only works on network based (not local).
+ root_name = "%s.d" % (CLOUD_CONFIG)
+ target_fn = os.path.join(root_name, "91_kernel_cmdline_url.cfg")
+ util.read_write_cmdline_url(target_fn)
+
+ # Cloud-init 'init' stage is broken up into the following sub-stages
+ # 1. Ensure that the init object fetches its config without errors
+ # 2. Setup logging/output redirections with resultant config (if any)
+ # 3. Initialize the cloud-init filesystem
+ # 4. Check if we can stop early by looking for various files
+ # 5. Fetch the datasource
+ # 6. Connect to the current instance location + update the cache
+ # 7. Consume the userdata (handlers get activated here)
+ # 8. Construct the modules object
+ # 9. Adjust any subsequent logging/output redirections using
+ # the modules objects configuration
+ # 10. Run the modules for the 'init' stage
+ # 11. Done!
+ welcome(name)
+ init = stages.Init(deps)
+ # Stage 1
+ init.read_cfg(extract_fns(args))
+ # Stage 2
+ outfmt = None
+ errfmt = None
+ try:
+ LOG.debug("Closing stdin")
+ util.close_stdin()
+ (outfmt, errfmt) = util.fixup_output(init.cfg, name)
+ except:
+ util.logexc(LOG, "Failed to setup output redirection!")
+ print_exc("Failed to setup output redirection!")
+ if args.debug:
+ # Reset so that all the debug handlers are closed out
+ LOG.debug(("Logging being reset, this logger may no"
+ " longer be active shortly"))
+ logging.resetLogging()
+ logging.setupLogging(init.cfg)
+ # Stage 3
+ try:
+ init.initialize()
+ except Exception:
+ util.logexc(LOG, "Failed to initialize, likely bad things to come!")
+ # Stage 4
+ path_helper = init.paths
+ if not args.local:
+ sys.stderr.write("%s\n" % (netinfo.debug_info()))
+ LOG.debug(("Checking to see if files that we need already"
+ " exist from a previous run that would allow us"
+ " to stop early."))
+ stop_files = [
+ os.path.join(path_helper.get_cpath("data"), "no-net"),
+ path_helper.get_ipath_cur("obj_pkl"),
+ ]
+ existing_files = []
+ for fn in stop_files:
+ try:
+ c = util.load_file(fn)
+ if len(c):
+ existing_files.append((fn, len(c)))
+ except Exception:
+ pass
+ if existing_files:
+ LOG.debug("Exiting early due to the existence of %s files",
+ existing_files)
+ return 0
+ else:
+ # The cache is not instance specific, so it has to be purged
+ # but we want 'start' to benefit from a cache if
+ # a previous start-local populated one...
+ manual_clean = util.get_cfg_option_bool(init.cfg,
+ 'manual_cache_clean', False)
+ if manual_clean:
+ LOG.debug("Not purging instance link, manual cleaning enabled")
+ init.purge_cache(False)
+ else:
+ init.purge_cache()
+ # Delete the non-net file as well
+ util.del_file(os.path.join(path_helper.get_cpath("data"), "no-net"))
+ # Stage 5
+ try:
+ init.fetch()
+ except sources.DataSourceNotFoundException:
+ util.logexc(LOG, ("No instance datasource found!"
+ " Likely bad things to come!"))
+ # In the case of cloud-init (net mode) it is a bit
+ # more likely that the user would consider it
+ # failure if nothing was found. When using
+ # upstart it will also mentions job failure
+ # in console log if exit code is != 0.
+ if not args.force:
+ if args.local:
+ return 0
+ else:
+ return 1
+ # Stage 6
+ iid = init.instancify()
+ LOG.debug("%s will now be targeting instance id: %s", name, iid)
+ init.update()
+ # Stage 7
+ try:
+ # Attempt to consume the data per instance.
+ # This may run user-data handlers and/or perform
+ # url downloads and such as needed.
+ (ran, _results) = init.cloudify().run('consume_userdata',
+ init.consume_userdata,
+ args=[PER_INSTANCE],
+ freq=PER_INSTANCE)
+ if not ran:
+ # Just consume anything that is set to run per-always
+ # if nothing ran in the per-instance code
+ #
+ # See: https://bugs.launchpad.net/bugs/819507 for a little
+ # reason behind this...
+ init.consume_userdata(PER_ALWAYS)
+ except Exception:
+ util.logexc(LOG, "Consuming user data failed!")
+ return 1
+ # Stage 8 - TODO - do we really need to re-extract our configs?
+ mods = stages.Modules(init, extract_fns(args))
+ # Stage 9 - TODO is this really needed??
+ try:
+ outfmt_orig = outfmt
+ errfmt_orig = errfmt
+ (outfmt, errfmt) = util.get_output_cfg(mods.cfg, name)
+ if outfmt_orig != outfmt or errfmt_orig != errfmt:
+ LOG.warn("Stdout, stderr changing to (%s, %s)", outfmt, errfmt)
+ (outfmt, errfmt) = util.fixup_output(mods.cfg, name)
+ except:
+ util.logexc(LOG, "Failed to re-adjust output redirection!")
+ # Stage 10
+ return run_module_section(mods, name, name)
+
+
+def main_modules(action_name, args):
+ name = args.mode
+ # Cloud-init 'modules' stages are broken up into the following sub-stages
+ # 1. Ensure that the init object fetches its config without errors
+ # 2. Get the datasource from the init object, if it does
+ # not exist then that means the main_init stage never
+ # worked, and thus this stage can not run.
+ # 3. Construct the modules object
+ # 4. Adjust any subsequent logging/output redirections using
+ # the modules objects configuration
+ # 5. Run the modules for the given stage name
+ # 6. Done!
+ welcome("%s:%s" % (action_name, name))
+ init = stages.Init(ds_deps=[])
+ # Stage 1
+ init.read_cfg(extract_fns(args))
+ # Stage 2
+ try:
+ init.fetch()
+ except sources.DataSourceNotFoundException:
+ # There was no datasource found, theres nothing to do
+ util.logexc(LOG, ('Can not apply stage %s, '
+ 'no datasource found!'
+ " Likely bad things to come!"), name)
+ print_exc(('Can not apply stage %s, '
+ 'no datasource found!'
+ " Likely bad things to come!") % (name))
+ if not args.force:
+ return 1
+ # Stage 3
+ mods = stages.Modules(init, extract_fns(args))
+ # Stage 4
+ try:
+ LOG.debug("Closing stdin")
+ util.close_stdin()
+ util.fixup_output(mods.cfg, name)
+ except:
+ util.logexc(LOG, "Failed to setup output redirection!")
+ if args.debug:
+ # Reset so that all the debug handlers are closed out
+ LOG.debug(("Logging being reset, this logger may no"
+ " longer be active shortly"))
+ logging.resetLogging()
+ logging.setupLogging(mods.cfg)
+ # Stage 5
+ return run_module_section(mods, name, name)
+
+
+def main_query(name, _args):
+ raise NotImplementedError(("Action '%s' is not"
+ " currently implemented") % (name))
+
+
+def main_single(name, args):
+ # Cloud-init single stage is broken up into the following sub-stages
+ # 1. Ensure that the init object fetches its config without errors
+ # 2. Attempt to fetch the datasource (warn if it doesn't work)
+ # 3. Construct the modules object
+ # 4. Adjust any subsequent logging/output redirections using
+ # the modules objects configuration
+ # 5. Run the single module
+ # 6. Done!
+ mod_name = args.name
+ welcome("%s:%s" % (name, mod_name))
+ init = stages.Init(ds_deps=[])
+ # Stage 1
+ init.read_cfg(extract_fns(args))
+ # Stage 2
+ try:
+ init.fetch()
+ except sources.DataSourceNotFoundException:
+ # There was no datasource found,
+ # that might be bad (or ok) depending on
+ # the module being ran (so continue on)
+ util.logexc(LOG, ("Failed to fetch your datasource,"
+ " likely bad things to come!"))
+ print_exc(("Failed to fetch your datasource,"
+ " likely bad things to come!"))
+ if not args.force:
+ return 1
+ # Stage 3
+ mods = stages.Modules(init, extract_fns(args))
+ mod_args = args.module_args
+ if mod_args:
+ LOG.debug("Using passed in arguments %s", mod_args)
+ mod_freq = args.frequency
+ if mod_freq:
+ LOG.debug("Using passed in frequency %s", mod_freq)
+ mod_freq = FREQ_SHORT_NAMES.get(mod_freq)
+ # Stage 4
+ try:
+ LOG.debug("Closing stdin")
+ util.close_stdin()
+ util.fixup_output(mods.cfg, None)
+ except:
+ util.logexc(LOG, "Failed to setup output redirection!")
+ if args.debug:
+ # Reset so that all the debug handlers are closed out
+ LOG.debug(("Logging being reset, this logger may no"
+ " longer be active shortly"))
+ logging.resetLogging()
+ logging.setupLogging(mods.cfg)
+ # Stage 5
+ (which_ran, failures) = mods.run_single(mod_name,
+ mod_args,
+ mod_freq)
+ if failures:
+ LOG.warn("Ran %s but it failed!", mod_name)
+ return 1
+ elif not which_ran:
+ LOG.warn("Did not run %s, does it exist?", mod_name)
+ return 1
+ else:
+ # Guess it worked
+ return 0
+
+
+def main():
+ parser = argparse.ArgumentParser()
+
+ # Top level args
+ parser.add_argument('--version', '-v', action='version',
+ version='%(prog)s ' + (version.version_string()))
+ parser.add_argument('--file', '-f', action='append',
+ dest='files',
+ help=('additional yaml configuration'
+ ' files to use'),
+ type=argparse.FileType('rb'))
+ parser.add_argument('--debug', '-d', action='store_true',
+ help=('show additional pre-action'
+ ' logging (default: %(default)s)'),
+ default=False)
+ parser.add_argument('--force', action='store_true',
+ help=('force running even if no datasource is'
+ ' found (use at your own risk)'),
+ dest='force',
+ default=False)
+ subparsers = parser.add_subparsers()
+
+ # Each action and its sub-options (if any)
+ parser_init = subparsers.add_parser('init',
+ help=('initializes cloud-init and'
+ ' performs initial modules'))
+ parser_init.add_argument("--local", '-l', action='store_true',
+ help="start in local mode (default: %(default)s)",
+ default=False)
+ # This is used so that we can know which action is selected +
+ # the functor to use to run this subcommand
+ parser_init.set_defaults(action=('init', main_init))
+
+ # These settings are used for the 'config' and 'final' stages
+ parser_mod = subparsers.add_parser('modules',
+ help=('activates modules '
+ 'using a given configuration key'))
+ parser_mod.add_argument("--mode", '-m', action='store',
+ help=("module configuration name "
+ "to use (default: %(default)s)"),
+ default='config',
+ choices=('init', 'config', 'final'))
+ parser_mod.set_defaults(action=('modules', main_modules))
+
+ # These settings are used when you want to query information
+ # stored in the cloud-init data objects/directories/files
+ parser_query = subparsers.add_parser('query',
+ help=('query information stored '
+ 'in cloud-init'))
+ parser_query.add_argument("--name", '-n', action="store",
+ help="item name to query on",
+ required=True,
+ choices=QUERY_DATA_TYPES)
+ parser_query.set_defaults(action=('query', main_query))
+
+ # This subcommand allows you to run a single module
+ parser_single = subparsers.add_parser('single',
+ help=('run a single module '))
+ parser_single.set_defaults(action=('single', main_single))
+ parser_single.add_argument("--name", '-n', action="store",
+ help="module name to run",
+ required=True)
+ parser_single.add_argument("--frequency", action="store",
+ help=("frequency of the module"),
+ required=False,
+ choices=list(FREQ_SHORT_NAMES.keys()))
+ parser_single.add_argument("module_args", nargs="*",
+ metavar='argument',
+ help=('any additional arguments to'
+ ' pass to this module'))
+ parser_single.set_defaults(action=('single', main_single))
+
+ args = parser.parse_args()
+
+ # Setup basic logging to start (until reinitialized)
+ # iff in debug mode...
+ if args.debug:
+ logging.setupBasicLogging()
+
+ (name, functor) = args.action
+ return functor(name, args)
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/cloud-init-cfg.py b/cloud-init-cfg.py
deleted file mode 100755
index 3a475c1c..00000000
--- a/cloud-init-cfg.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import cloudinit
-import cloudinit.util as util
-import cloudinit.CloudConfig as CC
-import logging
-import os
-
-
-def Usage(out=sys.stdout):
- out.write("Usage: %s name\n" % sys.argv[0])
-
-
-def main():
- # expect to be called with
- # name [ freq [ args ]
- # run the cloud-config job 'name' at with given args
- # or
- # read cloud config jobs from config (builtin -> system)
- # and run all in order
-
- util.close_stdin()
-
- modename = "config"
-
- if len(sys.argv) < 2:
- Usage(sys.stderr)
- sys.exit(1)
- if sys.argv[1] == "all":
- name = "all"
- if len(sys.argv) > 2:
- modename = sys.argv[2]
- else:
- freq = None
- run_args = []
- name = sys.argv[1]
- if len(sys.argv) > 2:
- freq = sys.argv[2]
- if freq == "None":
- freq = None
- if len(sys.argv) > 3:
- run_args = sys.argv[3:]
-
- cfg_path = cloudinit.get_ipath_cur("cloud_config")
- cfg_env_name = cloudinit.cfg_env_name
- if cfg_env_name in os.environ:
- cfg_path = os.environ[cfg_env_name]
-
- cloud = cloudinit.CloudInit(ds_deps=[]) # ds_deps=[], get only cached
- try:
- cloud.get_data_source()
- except cloudinit.DataSourceNotFoundException as e:
- # there was no datasource found, theres nothing to do
- sys.exit(0)
-
- cc = CC.CloudConfig(cfg_path, cloud)
-
- try:
- (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, modename)
- CC.redirect_output(outfmt, errfmt)
- except Exception as e:
- err("Failed to get and set output config: %s\n" % e)
-
- cloudinit.logging_set_from_cfg(cc.cfg)
- log = logging.getLogger()
- log.info("cloud-init-cfg %s" % sys.argv[1:])
-
- module_list = []
- if name == "all":
- modlist_cfg_name = "cloud_%s_modules" % modename
- module_list = CC.read_cc_modules(cc.cfg, modlist_cfg_name)
- if not len(module_list):
- err("no modules to run in cloud_config [%s]" % modename, log)
- sys.exit(0)
- else:
- module_list.append([name, freq] + run_args)
-
- failures = CC.run_cc_modules(cc, module_list, log)
- if len(failures):
- err("errors running cloud_config [%s]: %s" % (modename, failures), log)
- sys.exit(len(failures))
-
-
-def err(msg, log=None):
- if log:
- log.error(msg)
- sys.stderr.write(msg + "\n")
-
-
-def fail(msg, log=None):
- err(msg, log)
- sys.exit(1)
-
-if __name__ == '__main__':
- main()
diff --git a/cloud-init-query.py b/cloud-init-query.py
deleted file mode 100755
index 856cf462..00000000
--- a/cloud-init-query.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import sys
-import cloudinit
-import cloudinit.CloudConfig
-
-
-def Usage(out=sys.stdout):
- out.write("Usage: %s name\n" % sys.argv[0])
-
-
-def main():
- # expect to be called with name of item to fetch
- if len(sys.argv) != 2:
- Usage(sys.stderr)
- sys.exit(1)
-
- cfg_path = cloudinit.get_ipath_cur("cloud_config")
- cc = cloudinit.CloudConfig.CloudConfig(cfg_path)
- data = {
- 'user_data': cc.cloud.get_userdata(),
- 'user_data_raw': cc.cloud.get_userdata_raw(),
- 'instance_id': cc.cloud.get_instance_id(),
- }
-
- name = sys.argv[1].replace('-', '_')
-
- if name not in data:
- sys.stderr.write("unknown name '%s'. Known values are:\n %s\n" %
- (sys.argv[1], ' '.join(data.keys())))
- sys.exit(1)
-
- print data[name]
- sys.exit(0)
-
-if __name__ == '__main__':
- main()
diff --git a/cloud-init.py b/cloud-init.py
deleted file mode 100755
index 2acea3f8..00000000
--- a/cloud-init.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import subprocess
-import sys
-
-import cloudinit
-import cloudinit.util as util
-import cloudinit.CloudConfig as CC
-import cloudinit.DataSource as ds
-import cloudinit.netinfo as netinfo
-import time
-import traceback
-import logging
-import errno
-import os
-
-
-def warn(wstr):
- sys.stderr.write("WARN:%s" % wstr)
-
-
-def main():
- util.close_stdin()
-
- cmds = ("start", "start-local")
- deps = {"start": (ds.DEP_FILESYSTEM, ds.DEP_NETWORK),
- "start-local": (ds.DEP_FILESYSTEM, )}
-
- cmd = ""
- if len(sys.argv) > 1:
- cmd = sys.argv[1]
-
- cfg_path = None
- if len(sys.argv) > 2:
- # this is really for debugging only
- # but you can invoke on development system with ./config/cloud.cfg
- cfg_path = sys.argv[2]
-
- if not cmd in cmds:
- sys.stderr.write("bad command %s. use one of %s\n" % (cmd, cmds))
- sys.exit(1)
-
- now = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
- try:
- uptimef = open("/proc/uptime")
- uptime = uptimef.read().split(" ")[0]
- uptimef.close()
- except IOError as e:
- warn("unable to open /proc/uptime\n")
- uptime = "na"
-
- cmdline_msg = None
- cmdline_exc = None
- if cmd == "start":
- target = "%s.d/%s" % (cloudinit.system_config,
- "91_kernel_cmdline_url.cfg")
- if os.path.exists(target):
- cmdline_msg = "cmdline: %s existed" % target
- else:
- cmdline = util.get_cmdline()
- try:
- (key, url, content) = cloudinit.get_cmdline_url(
- cmdline=cmdline)
- if key and content:
- util.write_file(target, content, mode=0600)
- cmdline_msg = ("cmdline: wrote %s from %s, %s" %
- (target, key, url))
- elif key:
- cmdline_msg = ("cmdline: %s, %s had no cloud-config" %
- (key, url))
- except Exception:
- cmdline_exc = ("cmdline: '%s' raised exception\n%s" %
- (cmdline, traceback.format_exc()))
- warn(cmdline_exc)
-
- try:
- cfg = cloudinit.get_base_cfg(cfg_path)
- except Exception as e:
- warn("Failed to get base config. falling back to builtin: %s\n" % e)
- try:
- cfg = cloudinit.get_builtin_cfg()
- except Exception as e:
- warn("Unable to load builtin config\n")
- raise
-
- try:
- (outfmt, errfmt) = CC.get_output_cfg(cfg, "init")
- CC.redirect_output(outfmt, errfmt)
- except Exception as e:
- warn("Failed to get and set output config: %s\n" % e)
-
- cloudinit.logging_set_from_cfg(cfg)
- log = logging.getLogger()
-
- if cmdline_exc:
- log.debug(cmdline_exc)
- elif cmdline_msg:
- log.debug(cmdline_msg)
-
- try:
- cloudinit.initfs()
- except Exception as e:
- warn("failed to initfs, likely bad things to come: %s\n" % str(e))
-
- nonet_path = "%s/%s" % (cloudinit.get_cpath("data"), "no-net")
-
- if cmd == "start":
- print netinfo.debug_info()
-
- stop_files = (cloudinit.get_ipath_cur("obj_pkl"), nonet_path)
- # if starting as the network start, there are cases
- # where everything is already done for us, and it makes
- # most sense to exit early and silently
- for f in stop_files:
- try:
- fp = open(f, "r")
- fp.close()
- except:
- continue
-
- log.debug("no need for cloud-init start to run (%s)\n", f)
- sys.exit(0)
- elif cmd == "start-local":
- # cache is not instance specific, so it has to be purged
- # but we want 'start' to benefit from a cache if
- # a previous start-local populated one
- manclean = util.get_cfg_option_bool(cfg, 'manual_cache_clean', False)
- if manclean:
- log.debug("not purging cache, manual_cache_clean = True")
- cloudinit.purge_cache(not manclean)
-
- try:
- os.unlink(nonet_path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- msg = "cloud-init %s running: %s. up %s seconds" % (cmd, now, uptime)
- sys.stderr.write(msg + "\n")
- sys.stderr.flush()
-
- log.info(msg)
-
- cloud = cloudinit.CloudInit(ds_deps=deps[cmd])
-
- try:
- cloud.get_data_source()
- except cloudinit.DataSourceNotFoundException as e:
- sys.stderr.write("no instance data found in %s\n" % cmd)
- sys.exit(0)
-
- # set this as the current instance
- cloud.set_cur_instance()
-
- # store the metadata
- cloud.update_cache()
-
- msg = "found data source: %s" % cloud.datasource
- sys.stderr.write(msg + "\n")
- log.debug(msg)
-
- # parse the user data (ec2-run-userdata.py)
- try:
- ran = cloud.sem_and_run("consume_userdata", cloudinit.per_instance,
- cloud.consume_userdata, [cloudinit.per_instance], False)
- if not ran:
- cloud.consume_userdata(cloudinit.per_always)
- except:
- warn("consuming user data failed!\n")
- raise
-
- cfg_path = cloudinit.get_ipath_cur("cloud_config")
- cc = CC.CloudConfig(cfg_path, cloud)
-
- # if the output config changed, update output and err
- try:
- outfmt_orig = outfmt
- errfmt_orig = errfmt
- (outfmt, errfmt) = CC.get_output_cfg(cc.cfg, "init")
- if outfmt_orig != outfmt or errfmt_orig != errfmt:
- warn("stdout, stderr changing to (%s,%s)" % (outfmt, errfmt))
- CC.redirect_output(outfmt, errfmt)
- except Exception as e:
- warn("Failed to get and set output config: %s\n" % e)
-
- # send the cloud-config ready event
- cc_path = cloudinit.get_ipath_cur('cloud_config')
- cc_ready = cc.cfg.get("cc_ready_cmd",
- ['initctl', 'emit', 'cloud-config',
- '%s=%s' % (cloudinit.cfg_env_name, cc_path)])
- if cc_ready:
- if isinstance(cc_ready, str):
- cc_ready = ['sh', '-c', cc_ready]
- subprocess.Popen(cc_ready).communicate()
-
- module_list = CC.read_cc_modules(cc.cfg, "cloud_init_modules")
-
- failures = []
- if len(module_list):
- failures = CC.run_cc_modules(cc, module_list, log)
- else:
- msg = "no cloud_init_modules to run"
- sys.stderr.write(msg + "\n")
- log.debug(msg)
- sys.exit(0)
-
- sys.exit(len(failures))
-
-if __name__ == '__main__':
- main()
diff --git a/cloudinit/CloudConfig/__init__.py b/cloudinit/CloudConfig/__init__.py
deleted file mode 100644
index d2d1035a..00000000
--- a/cloudinit/CloudConfig/__init__.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2008-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Chuck Short <chuck.short@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-import yaml
-import cloudinit
-import cloudinit.util as util
-import sys
-import traceback
-import os
-import subprocess
-import time
-
-per_instance = cloudinit.per_instance
-per_always = cloudinit.per_always
-per_once = cloudinit.per_once
-
-
-class CloudConfig():
- cfgfile = None
- cfg = None
-
- def __init__(self, cfgfile, cloud=None, ds_deps=None):
- if cloud == None:
- self.cloud = cloudinit.CloudInit(ds_deps)
- self.cloud.get_data_source()
- else:
- self.cloud = cloud
- self.cfg = self.get_config_obj(cfgfile)
-
- def get_config_obj(self, cfgfile):
- try:
- cfg = util.read_conf(cfgfile)
- except:
- # TODO: this 'log' could/should be passed in
- cloudinit.log.critical("Failed loading of cloud config '%s'. "
- "Continuing with empty config\n" % cfgfile)
- cloudinit.log.debug(traceback.format_exc() + "\n")
- cfg = None
- if cfg is None:
- cfg = {}
-
- try:
- ds_cfg = self.cloud.datasource.get_config_obj()
- except:
- ds_cfg = {}
-
- cfg = util.mergedict(cfg, ds_cfg)
- return(util.mergedict(cfg, self.cloud.cfg))
-
- def handle(self, name, args, freq=None):
- try:
- mod = __import__("cc_" + name.replace("-", "_"), globals())
- def_freq = getattr(mod, "frequency", per_instance)
- handler = getattr(mod, "handle")
-
- if not freq:
- freq = def_freq
-
- self.cloud.sem_and_run("config-" + name, freq, handler,
- [name, self.cfg, self.cloud, cloudinit.log, args])
- except:
- raise
-
-
-# reads a cloudconfig module list, returns
-# a 2 dimensional array suitable to pass to run_cc_modules
-def read_cc_modules(cfg, name):
- if name not in cfg:
- return([])
- module_list = []
- # create 'module_list', an array of arrays
- # where array[0] = config
- # array[1] = freq
- # array[2:] = arguemnts
- for item in cfg[name]:
- if isinstance(item, str):
- module_list.append((item,))
- elif isinstance(item, list):
- module_list.append(item)
- else:
- raise TypeError("failed to read '%s' item in config")
- return(module_list)
-
-
-def run_cc_modules(cc, module_list, log):
- failures = []
- for cfg_mod in module_list:
- name = cfg_mod[0]
- freq = None
- run_args = []
- if len(cfg_mod) > 1:
- freq = cfg_mod[1]
- if len(cfg_mod) > 2:
- run_args = cfg_mod[2:]
-
- try:
- log.debug("handling %s with freq=%s and args=%s" %
- (name, freq, run_args))
- cc.handle(name, run_args, freq=freq)
- except:
- log.warn(traceback.format_exc())
- log.error("config handling of %s, %s, %s failed\n" %
- (name, freq, run_args))
- failures.append(name)
-
- return(failures)
-
-
-# always returns well formated values
-# cfg is expected to have an entry 'output' in it, which is a dictionary
-# that includes entries for 'init', 'config', 'final' or 'all'
-# init: /var/log/cloud.out
-# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
-# final:
-# output: "| logger -p"
-# error: "> /dev/null"
-# this returns the specific 'mode' entry, cleanly formatted, with value
-# None if if none is given
-def get_output_cfg(cfg, mode="init"):
- ret = [None, None]
- if not 'output' in cfg:
- return ret
-
- outcfg = cfg['output']
- if mode in outcfg:
- modecfg = outcfg[mode]
- else:
- if 'all' not in outcfg:
- return ret
- # if there is a 'all' item in the output list
- # then it applies to all users of this (init, config, final)
- modecfg = outcfg['all']
-
- # if value is a string, it specifies stdout and stderr
- if isinstance(modecfg, str):
- ret = [modecfg, modecfg]
-
- # if its a list, then we expect (stdout, stderr)
- if isinstance(modecfg, list):
- if len(modecfg) > 0:
- ret[0] = modecfg[0]
- if len(modecfg) > 1:
- ret[1] = modecfg[1]
-
- # if it is a dictionary, expect 'out' and 'error'
- # items, which indicate out and error
- if isinstance(modecfg, dict):
- if 'output' in modecfg:
- ret[0] = modecfg['output']
- if 'error' in modecfg:
- ret[1] = modecfg['error']
-
- # if err's entry == "&1", then make it same as stdout
- # as in shell syntax of "echo foo >/dev/null 2>&1"
- if ret[1] == "&1":
- ret[1] = ret[0]
-
- swlist = [">>", ">", "|"]
- for i in range(len(ret)):
- if not ret[i]:
- continue
- val = ret[i].lstrip()
- found = False
- for s in swlist:
- if val.startswith(s):
- val = "%s %s" % (s, val[len(s):].strip())
- found = True
- break
- if not found:
- # default behavior is append
- val = "%s %s" % (">>", val.strip())
- ret[i] = val
-
- return(ret)
-
-
-# redirect_output(outfmt, errfmt, orig_out, orig_err)
-# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
-# fmt can be:
-# > FILEPATH
-# >> FILEPATH
-# | program [ arg1 [ arg2 [ ... ] ] ]
-#
-# with a '|', arguments are passed to shell, so one level of
-# shell escape is required.
-def redirect_output(outfmt, errfmt, o_out=sys.stdout, o_err=sys.stderr):
- if outfmt:
- (mode, arg) = outfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("invalid type for outfmt: %s" % outfmt)
-
- if o_out:
- os.dup2(new_fp.fileno(), o_out.fileno())
- if errfmt == outfmt:
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
- if errfmt:
- (mode, arg) = errfmt.split(" ", 1)
- if mode == ">" or mode == ">>":
- owith = "ab"
- if mode == ">":
- owith = "wb"
- new_fp = open(arg, owith)
- elif mode == "|":
- proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
- new_fp = proc.stdin
- else:
- raise TypeError("invalid type for outfmt: %s" % outfmt)
-
- if o_err:
- os.dup2(new_fp.fileno(), o_err.fileno())
- return
-
-
-def run_per_instance(name, func, args, clear_on_fail=False):
- semfile = "%s/%s" % (cloudinit.get_ipath_cur("data"), name)
- if os.path.exists(semfile):
- return
-
- util.write_file(semfile, str(time.time()))
- try:
- func(*args)
- except:
- if clear_on_fail:
- os.unlink(semfile)
- raise
-
-
-# apt_get top level command (install, update...), and args to pass it
-def apt_get(tlc, args=None):
- if args is None:
- args = []
- e = os.environ.copy()
- e['DEBIAN_FRONTEND'] = 'noninteractive'
- cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
- '--assume-yes', '--quiet', tlc]
- cmd.extend(args)
- subprocess.check_call(cmd, env=e)
-
-
-def update_package_sources():
- run_per_instance("update-sources", apt_get, ("update",))
-
-
-def install_packages(pkglist):
- update_package_sources()
- apt_get("install", pkglist)
diff --git a/cloudinit/CloudConfig/cc_apt_pipelining.py b/cloudinit/CloudConfig/cc_apt_pipelining.py
deleted file mode 100644
index 0286a9ae..00000000
--- a/cloudinit/CloudConfig/cc_apt_pipelining.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-#
-# Author: Ben Howard <ben.howard@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-
-frequency = per_instance
-default_file = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
-
-
-def handle(_name, cfg, _cloud, log, _args):
-
- apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
- apt_pipe_value = str(apt_pipe_value).lower()
-
- if apt_pipe_value == "false":
- write_apt_snippet("0", log)
-
- elif apt_pipe_value in ("none", "unchanged", "os"):
- return
-
- elif apt_pipe_value in str(range(0, 6)):
- write_apt_snippet(apt_pipe_value, log)
-
- else:
- log.warn("Invalid option for apt_pipeling: %s" % apt_pipe_value)
-
-
-def write_apt_snippet(setting, log, f_name=default_file):
- """ Writes f_name with apt pipeline depth 'setting' """
-
- acquire_pipeline_depth = 'Acquire::http::Pipeline-Depth "%s";\n'
- file_contents = ("//Written by cloud-init per 'apt_pipelining'\n"
- + (acquire_pipeline_depth % setting))
-
- util.write_file(f_name, file_contents)
-
- log.debug("Wrote %s with APT pipeline setting" % f_name)
diff --git a/cloudinit/CloudConfig/cc_apt_update_upgrade.py b/cloudinit/CloudConfig/cc_apt_update_upgrade.py
deleted file mode 100644
index a7049bce..00000000
--- a/cloudinit/CloudConfig/cc_apt_update_upgrade.py
+++ /dev/null
@@ -1,241 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-import subprocess
-import traceback
-import os
-import glob
-import cloudinit.CloudConfig as cc
-
-
-def handle(_name, cfg, cloud, log, _args):
- update = util.get_cfg_option_bool(cfg, 'apt_update', False)
- upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
-
- release = get_release()
-
- mirror = find_apt_mirror(cloud, cfg)
-
- log.debug("selected mirror at: %s" % mirror)
-
- if not util.get_cfg_option_bool(cfg, \
- 'apt_preserve_sources_list', False):
- generate_sources_list(release, mirror)
- old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror', \
- "archive.ubuntu.com/ubuntu")
- rename_apt_lists(old_mir, mirror)
-
- # set up proxy
- proxy = cfg.get("apt_proxy", None)
- proxy_filename = "/etc/apt/apt.conf.d/95cloud-init-proxy"
- if proxy:
- try:
- contents = "Acquire::HTTP::Proxy \"%s\";\n"
- with open(proxy_filename, "w") as fp:
- fp.write(contents % proxy)
- except Exception as e:
- log.warn("Failed to write proxy to %s" % proxy_filename)
- elif os.path.isfile(proxy_filename):
- os.unlink(proxy_filename)
-
- # process 'apt_sources'
- if 'apt_sources' in cfg:
- errors = add_sources(cfg['apt_sources'],
- {'MIRROR': mirror, 'RELEASE': release})
- for e in errors:
- log.warn("Source Error: %s\n" % ':'.join(e))
-
- dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
- if dconf_sel:
- log.debug("setting debconf selections per cloud config")
- try:
- util.subp(('debconf-set-selections', '-'), dconf_sel)
- except:
- log.error("Failed to run debconf-set-selections")
- log.debug(traceback.format_exc())
-
- pkglist = util.get_cfg_option_list_or_str(cfg, 'packages', [])
-
- errors = []
- if update or len(pkglist) or upgrade:
- try:
- cc.update_package_sources()
- except subprocess.CalledProcessError as e:
- log.warn("apt-get update failed")
- log.debug(traceback.format_exc())
- errors.append(e)
-
- if upgrade:
- try:
- cc.apt_get("upgrade")
- except subprocess.CalledProcessError as e:
- log.warn("apt upgrade failed")
- log.debug(traceback.format_exc())
- errors.append(e)
-
- if len(pkglist):
- try:
- cc.install_packages(pkglist)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to install packages: %s " % pkglist)
- log.debug(traceback.format_exc())
- errors.append(e)
-
- if len(errors):
- raise errors[0]
-
- return(True)
-
-
-def mirror2lists_fileprefix(mirror):
- string = mirror
- # take of http:// or ftp://
- if string.endswith("/"):
- string = string[0:-1]
- pos = string.find("://")
- if pos >= 0:
- string = string[pos + 3:]
- string = string.replace("/", "_")
- return string
-
-
-def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
- oprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(omirror))
- nprefix = "%s/%s" % (lists_d, mirror2lists_fileprefix(new_mirror))
- if(oprefix == nprefix):
- return
- olen = len(oprefix)
- for filename in glob.glob("%s_*" % oprefix):
- os.rename(filename, "%s%s" % (nprefix, filename[olen:]))
-
-
-def get_release():
- stdout, _stderr = subprocess.Popen(['lsb_release', '-cs'],
- stdout=subprocess.PIPE).communicate()
- return(str(stdout).strip())
-
-
-def generate_sources_list(codename, mirror):
- util.render_to_file('sources.list', '/etc/apt/sources.list', \
- {'mirror': mirror, 'codename': codename})
-
-
-def add_sources(srclist, searchList=None):
- """
- add entries in /etc/apt/sources.list.d for each abbreviated
- sources.list entry in 'srclist'. When rendering template, also
- include the values in dictionary searchList
- """
- if searchList is None:
- searchList = {}
- elst = []
-
- for ent in srclist:
- if 'source' not in ent:
- elst.append(["", "missing source"])
- continue
-
- source = ent['source']
- if source.startswith("ppa:"):
- try:
- util.subp(["add-apt-repository", source])
- except:
- elst.append([source, "add-apt-repository failed"])
- continue
-
- source = util.render_string(source, searchList)
-
- if 'filename' not in ent:
- ent['filename'] = 'cloud_config_sources.list'
-
- if not ent['filename'].startswith("/"):
- ent['filename'] = "%s/%s" % \
- ("/etc/apt/sources.list.d/", ent['filename'])
-
- if ('keyid' in ent and 'key' not in ent):
- ks = "keyserver.ubuntu.com"
- if 'keyserver' in ent:
- ks = ent['keyserver']
- try:
- ent['key'] = util.getkeybyid(ent['keyid'], ks)
- except:
- elst.append([source, "failed to get key from %s" % ks])
- continue
-
- if 'key' in ent:
- try:
- util.subp(('apt-key', 'add', '-'), ent['key'])
- except:
- elst.append([source, "failed add key"])
-
- try:
- util.write_file(ent['filename'], source + "\n", omode="ab")
- except:
- elst.append([source, "failed write to file %s" % ent['filename']])
-
- return(elst)
-
-
-def find_apt_mirror(cloud, cfg):
- """ find an apt_mirror given the cloud and cfg provided """
-
- # TODO: distro and defaults should be configurable
- distro = "ubuntu"
- defaults = {
- 'ubuntu': "http://archive.ubuntu.com/ubuntu",
- 'debian': "http://archive.debian.org/debian",
- }
- mirror = None
-
- cfg_mirror = cfg.get("apt_mirror", None)
- if cfg_mirror:
- mirror = cfg["apt_mirror"]
- elif "apt_mirror_search" in cfg:
- mirror = util.search_for_mirror(cfg['apt_mirror_search'])
- else:
- if cloud:
- mirror = cloud.get_mirror()
-
- mydom = ""
-
- doms = []
-
- if not mirror and cloud:
- # if we have a fqdn, then search its domain portion first
- (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
- mydom = ".".join(fqdn.split(".")[1:])
- if mydom:
- doms.append(".%s" % mydom)
-
- if not mirror:
- doms.extend((".localdomain", "",))
-
- mirror_list = []
- mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
- for post in doms:
- mirror_list.append(mirrorfmt % post)
-
- mirror = util.search_for_mirror(mirror_list)
-
- if not mirror:
- mirror = defaults[distro]
-
- return mirror
diff --git a/cloudinit/CloudConfig/cc_bootcmd.py b/cloudinit/CloudConfig/cc_bootcmd.py
deleted file mode 100644
index f584da02..00000000
--- a/cloudinit/CloudConfig/cc_bootcmd.py
+++ /dev/null
@@ -1,48 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import tempfile
-import os
-from cloudinit.CloudConfig import per_always
-frequency = per_always
-
-
-def handle(_name, cfg, cloud, log, _args):
- if "bootcmd" not in cfg:
- return
-
- try:
- content = util.shellify(cfg["bootcmd"])
- tmpf = tempfile.TemporaryFile()
- tmpf.write(content)
- tmpf.seek(0)
- except:
- log.warn("failed to shellify bootcmd")
- raise
-
- try:
- env = os.environ.copy()
- env['INSTANCE_ID'] = cloud.get_instance_id()
- subprocess.check_call(['/bin/sh'], env=env, stdin=tmpf)
- tmpf.close()
- except:
- log.warn("failed to run commands from bootcmd")
- raise
diff --git a/cloudinit/CloudConfig/cc_chef.py b/cloudinit/CloudConfig/cc_chef.py
deleted file mode 100644
index 941e04fe..00000000
--- a/cloudinit/CloudConfig/cc_chef.py
+++ /dev/null
@@ -1,119 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Avishai Ish-Shalom <avishai@fewbytes.com>
-# Author: Mike Moulton <mike@meltmedia.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-import json
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
-
-ruby_version_default = "1.8"
-
-
-def handle(_name, cfg, cloud, log, _args):
- # If there isn't a chef key in the configuration don't do anything
- if 'chef' not in cfg:
- return
- chef_cfg = cfg['chef']
-
- # ensure the chef directories we use exist
- mkdirs(['/etc/chef', '/var/log/chef', '/var/lib/chef',
- '/var/cache/chef', '/var/backups/chef', '/var/run/chef'])
-
- # set the validation key based on the presence of either 'validation_key'
- # or 'validation_cert'. In the case where both exist, 'validation_key'
- # takes precedence
- for key in ('validation_key', 'validation_cert'):
- if key in chef_cfg and chef_cfg[key]:
- with open('/etc/chef/validation.pem', 'w') as validation_key_fh:
- validation_key_fh.write(chef_cfg[key])
- break
-
- # create the chef config from template
- util.render_to_file('chef_client.rb', '/etc/chef/client.rb',
- {'server_url': chef_cfg['server_url'],
- 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name',
- cloud.datasource.get_instance_id()),
- 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
- '_default'),
- 'validation_name': chef_cfg['validation_name']})
-
- # set the firstboot json
- with open('/etc/chef/firstboot.json', 'w') as firstboot_json_fh:
- initial_json = {}
- if 'run_list' in chef_cfg:
- initial_json['run_list'] = chef_cfg['run_list']
- if 'initial_attributes' in chef_cfg:
- initial_attributes = chef_cfg['initial_attributes']
- for k in initial_attributes.keys():
- initial_json[k] = initial_attributes[k]
- firstboot_json_fh.write(json.dumps(initial_json))
-
- # If chef is not installed, we install chef based on 'install_type'
- if not os.path.isfile('/usr/bin/chef-client'):
- install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
- 'packages')
- if install_type == "gems":
- # this will install and run the chef-client from gems
- chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
- ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
- ruby_version_default)
- install_chef_from_gems(ruby_version, chef_version)
- # and finally, run chef-client
- log.debug('running chef-client')
- subprocess.check_call(['/usr/bin/chef-client', '-d', '-i', '1800',
- '-s', '20'])
- else:
- # this will install and run the chef-client from packages
- cc.install_packages(('chef',))
-
-
-def get_ruby_packages(version):
- # return a list of packages needed to install ruby at version
- pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
- if version == "1.8":
- pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
- return(pkgs)
-
-
-def install_chef_from_gems(ruby_version, chef_version=None):
- cc.install_packages(get_ruby_packages(ruby_version))
- if not os.path.exists('/usr/bin/gem'):
- os.symlink('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
- if not os.path.exists('/usr/bin/ruby'):
- os.symlink('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
- if chef_version:
- subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
- '-v %s' % chef_version, '--no-ri',
- '--no-rdoc', '--bindir', '/usr/bin', '-q'])
- else:
- subprocess.check_call(['/usr/bin/gem', 'install', 'chef',
- '--no-ri', '--no-rdoc', '--bindir',
- '/usr/bin', '-q'])
-
-
-def ensure_dir(d):
- if not os.path.exists(d):
- os.makedirs(d)
-
-
-def mkdirs(dirs):
- for d in dirs:
- ensure_dir(d)
diff --git a/cloudinit/CloudConfig/cc_final_message.py b/cloudinit/CloudConfig/cc_final_message.py
deleted file mode 100644
index abb4ca32..00000000
--- a/cloudinit/CloudConfig/cc_final_message.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.CloudConfig import per_always
-import sys
-from cloudinit import util, boot_finished
-import time
-
-frequency = per_always
-
-final_message = "cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds"
-
-
-def handle(_name, cfg, _cloud, log, args):
- if len(args) != 0:
- msg_in = args[0]
- else:
- msg_in = util.get_cfg_option_str(cfg, "final_message", final_message)
-
- try:
- uptimef = open("/proc/uptime")
- uptime = uptimef.read().split(" ")[0]
- uptimef.close()
- except IOError as e:
- log.warn("unable to open /proc/uptime\n")
- uptime = "na"
-
- try:
- ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
- except:
- ts = "na"
-
- try:
- subs = {'UPTIME': uptime, 'TIMESTAMP': ts}
- sys.stdout.write("%s\n" % util.render_string(msg_in, subs))
- except Exception as e:
- log.warn("failed to render string to stdout: %s" % e)
-
- fp = open(boot_finished, "wb")
- fp.write(uptime + "\n")
- fp.close()
diff --git a/cloudinit/CloudConfig/cc_keys_to_console.py b/cloudinit/CloudConfig/cc_keys_to_console.py
deleted file mode 100644
index 73a477c0..00000000
--- a/cloudinit/CloudConfig/cc_keys_to_console.py
+++ /dev/null
@@ -1,42 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.CloudConfig import per_instance
-import cloudinit.util as util
-import subprocess
-
-frequency = per_instance
-
-
-def handle(_name, cfg, _cloud, log, _args):
- cmd = ['/usr/lib/cloud-init/write-ssh-key-fingerprints']
- fp_blacklist = util.get_cfg_option_list_or_str(cfg,
- "ssh_fp_console_blacklist", [])
- key_blacklist = util.get_cfg_option_list_or_str(cfg,
- "ssh_key_console_blacklist", ["ssh-dss"])
- try:
- confp = open('/dev/console', "wb")
- cmd.append(','.join(fp_blacklist))
- cmd.append(','.join(key_blacklist))
- subprocess.call(cmd, stdout=confp)
- confp.close()
- except:
- log.warn("writing keys to console value")
- raise
diff --git a/cloudinit/CloudConfig/cc_locale.py b/cloudinit/CloudConfig/cc_locale.py
deleted file mode 100644
index 2bb22fdb..00000000
--- a/cloudinit/CloudConfig/cc_locale.py
+++ /dev/null
@@ -1,54 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-import os.path
-import subprocess
-import traceback
-
-
-def apply_locale(locale, cfgfile):
- if os.path.exists('/usr/sbin/locale-gen'):
- subprocess.Popen(['locale-gen', locale]).communicate()
- if os.path.exists('/usr/sbin/update-locale'):
- subprocess.Popen(['update-locale', locale]).communicate()
-
- util.render_to_file('default-locale', cfgfile, {'locale': locale})
-
-
-def handle(_name, cfg, cloud, log, args):
- if len(args) != 0:
- locale = args[0]
- else:
- locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
-
- locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile",
- "/etc/default/locale")
-
- if not locale:
- return
-
- log.debug("setting locale to %s" % locale)
-
- try:
- apply_locale(locale, locale_cfgfile)
- except Exception as e:
- log.debug(traceback.format_exc(e))
- raise Exception("failed to apply locale %s" % locale)
diff --git a/cloudinit/CloudConfig/cc_mcollective.py b/cloudinit/CloudConfig/cc_mcollective.py
deleted file mode 100644
index a2a6230c..00000000
--- a/cloudinit/CloudConfig/cc_mcollective.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Marc Cluet <marc.cluet@canonical.com>
-# Based on code by Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import subprocess
-import StringIO
-import ConfigParser
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
-
-pubcert_file = "/etc/mcollective/ssl/server-public.pem"
-pricert_file = "/etc/mcollective/ssl/server-private.pem"
-
-
-# Our fake header section
-class FakeSecHead(object):
- def __init__(self, fp):
- self.fp = fp
- self.sechead = '[nullsection]\n'
-
- def readline(self):
- if self.sechead:
- try:
- return self.sechead
- finally:
- self.sechead = None
- else:
- return self.fp.readline()
-
-
-def handle(_name, cfg, _cloud, _log, _args):
- # If there isn't a mcollective key in the configuration don't do anything
- if 'mcollective' not in cfg:
- return
- mcollective_cfg = cfg['mcollective']
- # Start by installing the mcollective package ...
- cc.install_packages(("mcollective",))
-
- # ... and then update the mcollective configuration
- if 'conf' in mcollective_cfg:
- # Create object for reading server.cfg values
- mcollective_config = ConfigParser.ConfigParser()
- # Read server.cfg values from original file in order to be able to mix
- # the rest up
- mcollective_config.readfp(FakeSecHead(open('/etc/mcollective/'
- 'server.cfg')))
- for cfg_name, cfg in mcollective_cfg['conf'].iteritems():
- if cfg_name == 'public-cert':
- util.write_file(pubcert_file, cfg, mode=0644)
- mcollective_config.set(cfg_name,
- 'plugin.ssl_server_public', pubcert_file)
- mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
- elif cfg_name == 'private-cert':
- util.write_file(pricert_file, cfg, mode=0600)
- mcollective_config.set(cfg_name,
- 'plugin.ssl_server_private', pricert_file)
- mcollective_config.set(cfg_name, 'securityprovider', 'ssl')
- else:
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for o, v in cfg.iteritems():
- mcollective_config.set(cfg_name, o, v)
- # We got all our config as wanted we'll rename
- # the previous server.cfg and create our new one
- os.rename('/etc/mcollective/server.cfg',
- '/etc/mcollective/server.cfg.old')
- outputfile = StringIO.StringIO()
- mcollective_config.write(outputfile)
- # Now we got the whole file, write to disk except first line
- # Note below, that we've just used ConfigParser because it generally
- # works. Below, we remove the initial 'nullsection' header
- # and then change 'key = value' to 'key: value'. The global
- # search and replace of '=' with ':' could be problematic though.
- # this most likely needs fixing.
- util.write_file('/etc/mcollective/server.cfg',
- outputfile.getvalue().replace('[nullsection]\n', '').replace(' =',
- ':'),
- mode=0644)
-
- # Start mcollective
- subprocess.check_call(['service', 'mcollective', 'start'])
diff --git a/cloudinit/CloudConfig/cc_puppet.py b/cloudinit/CloudConfig/cc_puppet.py
deleted file mode 100644
index 6fc475f6..00000000
--- a/cloudinit/CloudConfig/cc_puppet.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import pwd
-import socket
-import subprocess
-import StringIO
-import ConfigParser
-import cloudinit.CloudConfig as cc
-import cloudinit.util as util
-
-
-def handle(_name, cfg, cloud, log, _args):
- # If there isn't a puppet key in the configuration don't do anything
- if 'puppet' not in cfg:
- return
- puppet_cfg = cfg['puppet']
- # Start by installing the puppet package ...
- cc.install_packages(("puppet",))
-
- # ... and then update the puppet configuration
- if 'conf' in puppet_cfg:
- # Add all sections from the conf object to puppet.conf
- puppet_conf_fh = open('/etc/puppet/puppet.conf', 'r')
- # Create object for reading puppet.conf values
- puppet_config = ConfigParser.ConfigParser()
- # Read puppet.conf values from original file in order to be able to
- # mix the rest up
- puppet_config.readfp(StringIO.StringIO(''.join(i.lstrip() for i in
- puppet_conf_fh.readlines())))
- # Close original file, no longer needed
- puppet_conf_fh.close()
- for cfg_name, cfg in puppet_cfg['conf'].iteritems():
- # ca_cert configuration is a special case
- # Dump the puppetmaster ca certificate in the correct place
- if cfg_name == 'ca_cert':
- # Puppet ssl sub-directory isn't created yet
- # Create it with the proper permissions and ownership
- os.makedirs('/var/lib/puppet/ssl')
- os.chmod('/var/lib/puppet/ssl', 0771)
- os.chown('/var/lib/puppet/ssl',
- pwd.getpwnam('puppet').pw_uid, 0)
- os.makedirs('/var/lib/puppet/ssl/certs/')
- os.chown('/var/lib/puppet/ssl/certs/',
- pwd.getpwnam('puppet').pw_uid, 0)
- ca_fh = open('/var/lib/puppet/ssl/certs/ca.pem', 'w')
- ca_fh.write(cfg)
- ca_fh.close()
- os.chown('/var/lib/puppet/ssl/certs/ca.pem',
- pwd.getpwnam('puppet').pw_uid, 0)
- util.restorecon_if_possible('/var/lib/puppet', recursive=True)
- else:
- #puppet_conf_fh.write("\n[%s]\n" % (cfg_name))
- # If puppet.conf already has this section we don't want to
- # write it again
- if puppet_config.has_section(cfg_name) == False:
- puppet_config.add_section(cfg_name)
- # Iterate throug the config items, we'll use ConfigParser.set
- # to overwrite or create new items as needed
- for o, v in cfg.iteritems():
- if o == 'certname':
- # Expand %f as the fqdn
- v = v.replace("%f", socket.getfqdn())
- # Expand %i as the instance id
- v = v.replace("%i",
- cloud.datasource.get_instance_id())
- # certname needs to be downcase
- v = v.lower()
- puppet_config.set(cfg_name, o, v)
- #puppet_conf_fh.write("%s=%s\n" % (o, v))
- # We got all our config as wanted we'll rename
- # the previous puppet.conf and create our new one
- os.rename('/etc/puppet/puppet.conf', '/etc/puppet/puppet.conf.old')
- with open('/etc/puppet/puppet.conf', 'wb') as configfile:
- puppet_config.write(configfile)
- util.restorecon_if_possible('/etc/puppet/puppet.conf')
- # Set puppet to automatically start
- if os.path.exists('/etc/default/puppet'):
- subprocess.check_call(['sed', '-i',
- '-e', 's/^START=.*/START=yes/',
- '/etc/default/puppet'])
- elif os.path.exists('/bin/systemctl'):
- subprocess.check_call(['/bin/systemctl', 'enable', 'puppet.service'])
- elif os.path.exists('/sbin/chkconfig'):
- subprocess.check_call(['/sbin/chkconfig', 'puppet', 'on'])
- else:
- log.warn("Do not know how to enable puppet service on this system")
- # Start puppetd
- subprocess.check_call(['service', 'puppet', 'start'])
diff --git a/cloudinit/CloudConfig/cc_resizefs.py b/cloudinit/CloudConfig/cc_resizefs.py
deleted file mode 100644
index 2dc66def..00000000
--- a/cloudinit/CloudConfig/cc_resizefs.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-import subprocess
-import os
-import stat
-import sys
-import time
-import tempfile
-from cloudinit.CloudConfig import per_always
-
-frequency = per_always
-
-
-def handle(_name, cfg, _cloud, log, args):
- if len(args) != 0:
- resize_root = False
- if str(args[0]).lower() in ['true', '1', 'on', 'yes']:
- resize_root = True
- else:
- resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
-
- if str(resize_root).lower() in ['false', '0']:
- return
-
- # we use mktemp rather than mkstemp because early in boot nothing
- # else should be able to race us for this, and we need to mknod.
- devpth = tempfile.mktemp(prefix="cloudinit.resizefs.", dir="/run")
-
- try:
- st_dev = os.stat("/").st_dev
- dev = os.makedev(os.major(st_dev), os.minor(st_dev))
- os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
- except:
- if util.is_container():
- log.debug("inside container, ignoring mknod failure in resizefs")
- return
- log.warn("Failed to make device node to resize /")
- raise
-
- cmd = ['blkid', '-c', '/dev/null', '-sTYPE', '-ovalue', devpth]
- try:
- (fstype, _err) = util.subp(cmd)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to get filesystem type of maj=%s, min=%s via: %s" %
- (os.major(st_dev), os.minor(st_dev), cmd))
- log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])
- os.unlink(devpth)
- raise
-
- if str(fstype).startswith("ext"):
- resize_cmd = ['resize2fs', devpth]
- elif fstype == "xfs":
- resize_cmd = ['xfs_growfs', devpth]
- else:
- os.unlink(devpth)
- log.debug("not resizing unknown filesystem %s" % fstype)
- return
-
- if resize_root == "noblock":
- fid = os.fork()
- if fid == 0:
- try:
- do_resize(resize_cmd, devpth, log)
- os._exit(0) # pylint: disable=W0212
- except Exception as exc:
- sys.stderr.write("Failed: %s" % exc)
- os._exit(1) # pylint: disable=W0212
- else:
- do_resize(resize_cmd, devpth, log)
-
- log.debug("resizing root filesystem (type=%s, maj=%i, min=%i, val=%s)" %
- (str(fstype).rstrip("\n"), os.major(st_dev), os.minor(st_dev),
- resize_root))
-
- return
-
-
-def do_resize(resize_cmd, devpth, log):
- try:
- start = time.time()
- util.subp(resize_cmd)
- except subprocess.CalledProcessError as e:
- log.warn("Failed to resize filesystem (%s)" % resize_cmd)
- log.warn("output=%s\nerror=%s\n", e.output[0], e.output[1])
- os.unlink(devpth)
- raise
-
- os.unlink(devpth)
- log.debug("resize took %s seconds" % (time.time() - start))
diff --git a/cloudinit/CloudConfig/cc_rightscale_userdata.py b/cloudinit/CloudConfig/cc_rightscale_userdata.py
deleted file mode 100644
index 5ed0848f..00000000
--- a/cloudinit/CloudConfig/cc_rightscale_userdata.py
+++ /dev/null
@@ -1,78 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-##
-## The purpose of this script is to allow cloud-init to consume
-## rightscale style userdata. rightscale user data is key-value pairs
-## in a url-query-string like format.
-##
-## for cloud-init support, there will be a key named
-## 'CLOUD_INIT_REMOTE_HOOK'.
-##
-## This cloud-config module will
-## - read the blob of data from raw user data, and parse it as key/value
-## - for each key that is found, download the content to
-## the local instance/scripts directory and set them executable.
-## - the files in that directory will be run by the user-scripts module
-## Therefore, this must run before that.
-##
-##
-
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_ipath_cur
-from urlparse import parse_qs
-
-frequency = per_instance
-my_name = "cc_rightscale_userdata"
-my_hookname = 'CLOUD_INIT_REMOTE_HOOK'
-
-
-def handle(_name, _cfg, cloud, log, _args):
- try:
- ud = cloud.get_userdata_raw()
- except:
- log.warn("failed to get raw userdata in %s" % my_name)
- return
-
- try:
- mdict = parse_qs(ud)
- if not my_hookname in mdict:
- return
- except:
- log.warn("failed to urlparse.parse_qa(userdata_raw())")
- raise
-
- scripts_d = get_ipath_cur('scripts')
- i = 0
- first_e = None
- for url in mdict[my_hookname]:
- fname = "%s/rightscale-%02i" % (scripts_d, i)
- i = i + 1
- try:
- content = util.readurl(url)
- util.write_file(fname, content, mode=0700)
- except Exception as e:
- if not first_e:
- first_e = None
- log.warn("%s failed to read %s: %s" % (my_name, url, e))
-
- if first_e:
- raise(e)
diff --git a/cloudinit/CloudConfig/cc_ssh.py b/cloudinit/CloudConfig/cc_ssh.py
deleted file mode 100644
index 48eb58bc..00000000
--- a/cloudinit/CloudConfig/cc_ssh.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-import cloudinit.SshUtil as sshutil
-import os
-import glob
-import subprocess
-
-DISABLE_ROOT_OPTS = "no-port-forwarding,no-agent-forwarding," \
-"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" " \
-"rather than the user \\\"root\\\".\';echo;sleep 10\""
-
-
-def handle(_name, cfg, cloud, log, _args):
-
- # remove the static keys from the pristine image
- if cfg.get("ssh_deletekeys", True):
- for f in glob.glob("/etc/ssh/ssh_host_*key*"):
- try:
- os.unlink(f)
- except:
- pass
-
- if "ssh_keys" in cfg:
- # if there are keys in cloud-config, use them
- key2file = {
- "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
- "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
- "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
- "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
- "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
- "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
- }
-
- for key, val in cfg["ssh_keys"].items():
- if key in key2file:
- util.write_file(key2file[key][0], val, key2file[key][1])
-
- priv2pub = {'rsa_private': 'rsa_public', 'dsa_private': 'dsa_public',
- 'ecdsa_private': 'ecdsa_public', }
-
- cmd = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
- for priv, pub in priv2pub.iteritems():
- if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
- continue
- pair = (key2file[priv][0], key2file[pub][0])
- subprocess.call(('sh', '-xc', cmd % pair))
- log.debug("generated %s from %s" % pair)
- else:
- # if not, generate them
- for keytype in util.get_cfg_option_list_or_str(cfg, 'ssh_genkeytypes',
- ['rsa', 'dsa', 'ecdsa']):
- keyfile = '/etc/ssh/ssh_host_%s_key' % keytype
- if not os.path.exists(keyfile):
- subprocess.call(['ssh-keygen', '-t', keytype, '-N', '',
- '-f', keyfile])
-
- util.restorecon_if_possible('/etc/ssh', recursive=True)
-
- try:
- user = util.get_cfg_option_str(cfg, 'user')
- disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
- disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
- DISABLE_ROOT_OPTS)
- keys = cloud.get_public_ssh_keys()
-
- if "ssh_authorized_keys" in cfg:
- cfgkeys = cfg["ssh_authorized_keys"]
- keys.extend(cfgkeys)
-
- apply_credentials(keys, user, disable_root, disable_root_opts, log)
- except:
- util.logexc(log)
- log.warn("applying credentials failed!\n")
-
-
-def apply_credentials(keys, user, disable_root,
- disable_root_opts=DISABLE_ROOT_OPTS, log=None):
- keys = set(keys)
- if user:
- sshutil.setup_user_keys(keys, user, '', log)
-
- if disable_root:
- key_prefix = disable_root_opts.replace('$USER', user)
- else:
- key_prefix = ''
-
- sshutil.setup_user_keys(keys, 'root', key_prefix, log)
diff --git a/cloudinit/CloudConfig/cc_timezone.py b/cloudinit/CloudConfig/cc_timezone.py
deleted file mode 100644
index e5c9901b..00000000
--- a/cloudinit/CloudConfig/cc_timezone.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit.CloudConfig import per_instance
-from cloudinit import util
-import os.path
-import shutil
-
-frequency = per_instance
-tz_base = "/usr/share/zoneinfo"
-
-
-def handle(_name, cfg, _cloud, log, args):
- if len(args) != 0:
- timezone = args[0]
- else:
- timezone = util.get_cfg_option_str(cfg, "timezone", False)
-
- if not timezone:
- return
-
- tz_file = "%s/%s" % (tz_base, timezone)
-
- if not os.path.isfile(tz_file):
- log.debug("Invalid timezone %s" % tz_file)
- raise Exception("Invalid timezone %s" % tz_file)
-
- try:
- fp = open("/etc/timezone", "wb")
- fp.write("%s\n" % timezone)
- fp.close()
- except:
- log.debug("failed to write to /etc/timezone")
- raise
- if os.path.exists("/etc/sysconfig/clock"):
- try:
- with open("/etc/sysconfig/clock", "w") as fp:
- fp.write('ZONE="%s"\n' % timezone)
- except:
- log.debug("failed to write to /etc/sysconfig/clock")
- raise
-
- try:
- shutil.copy(tz_file, "/etc/localtime")
- except:
- log.debug("failed to copy %s to /etc/localtime" % tz_file)
- raise
-
- log.debug("set timezone to %s" % timezone)
- return
diff --git a/cloudinit/CloudConfig/cc_update_etc_hosts.py b/cloudinit/CloudConfig/cc_update_etc_hosts.py
deleted file mode 100644
index 6ad2fca8..00000000
--- a/cloudinit/CloudConfig/cc_update_etc_hosts.py
+++ /dev/null
@@ -1,87 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_always
-import StringIO
-
-frequency = per_always
-
-
-def handle(_name, cfg, cloud, log, _args):
- (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
-
- manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
- if manage_hosts in ("True", "true", True, "template"):
- # render from template file
- try:
- if not hostname:
- log.info("manage_etc_hosts was set, but no hostname found")
- return
-
- util.render_to_file('hosts', '/etc/hosts',
- {'hostname': hostname, 'fqdn': fqdn})
- except Exception:
- log.warn("failed to update /etc/hosts")
- raise
- elif manage_hosts == "localhost":
- log.debug("managing 127.0.1.1 in /etc/hosts")
- update_etc_hosts(hostname, fqdn, log)
- return
- else:
- if manage_hosts not in ("False", False):
- log.warn("Unknown value for manage_etc_hosts. Assuming False")
- else:
- log.debug("not managing /etc/hosts")
-
-
-def update_etc_hosts(hostname, fqdn, _log):
- with open('/etc/hosts', 'r') as etchosts:
- header = "# Added by cloud-init\n"
- hosts_line = "127.0.1.1\t%s %s\n" % (fqdn, hostname)
- need_write = False
- need_change = True
- new_etchosts = StringIO.StringIO()
- for line in etchosts:
- split_line = [s.strip() for s in line.split()]
- if len(split_line) < 2:
- new_etchosts.write(line)
- continue
- if line == header:
- continue
- ip, hosts = split_line[0], split_line[1:]
- if ip == "127.0.1.1":
- if sorted([hostname, fqdn]) == sorted(hosts):
- need_change = False
- if need_change == True:
- line = "%s%s" % (header, hosts_line)
- need_change = False
- need_write = True
- new_etchosts.write(line)
- etchosts.close()
- if need_change == True:
- new_etchosts.write("%s%s" % (header, hosts_line))
- need_write = True
- if need_write == True:
- new_etcfile = open('/etc/hosts', 'wb')
- new_etcfile.write(new_etchosts.getvalue())
- new_etcfile.close()
- new_etchosts.close()
- return
diff --git a/cloudinit/CloudConfig/cc_update_hostname.py b/cloudinit/CloudConfig/cc_update_hostname.py
deleted file mode 100644
index b9d1919a..00000000
--- a/cloudinit/CloudConfig/cc_update_hostname.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2011 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Haefliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.util as util
-import subprocess
-import errno
-from cloudinit.CloudConfig import per_always
-
-frequency = per_always
-
-
-def handle(_name, cfg, cloud, log, _args):
- if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug("preserve_hostname is set. not updating hostname")
- return
-
- (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
- try:
- prev = "%s/%s" % (cloud.get_cpath('data'), "previous-hostname")
- update_hostname(hostname, prev, log)
- except Exception:
- log.warn("failed to set hostname\n")
- raise
-
-
-# read hostname from a 'hostname' file
-# allow for comments and stripping line endings.
-# if file doesn't exist, or no contents, return default
-def read_hostname(filename, default=None):
- try:
- fp = open(filename, "r")
- lines = fp.readlines()
- fp.close()
- for line in lines:
- hpos = line.find("#")
- if hpos != -1:
- line = line[0:hpos]
- line = line.rstrip()
- if line:
- return line
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
- return default
-
-
-def update_hostname(hostname, prev_file, log):
- etc_file = "/etc/hostname"
-
- hostname_prev = None
- hostname_in_etc = None
-
- try:
- hostname_prev = read_hostname(prev_file)
- except Exception as e:
- log.warn("Failed to open %s: %s" % (prev_file, e))
-
- try:
- hostname_in_etc = read_hostname(etc_file)
- except:
- log.warn("Failed to open %s" % etc_file)
-
- update_files = []
- if not hostname_prev or hostname_prev != hostname:
- update_files.append(prev_file)
-
- if (not hostname_in_etc or
- (hostname_in_etc == hostname_prev and hostname_in_etc != hostname)):
- update_files.append(etc_file)
-
- try:
- for fname in update_files:
- util.write_file(fname, "%s\n" % hostname, 0644)
- log.debug("wrote %s to %s" % (hostname, fname))
- except:
- log.warn("failed to write hostname to %s" % fname)
-
- if hostname_in_etc and hostname_prev and hostname_in_etc != hostname_prev:
- log.debug("%s differs from %s. assuming user maintained" %
- (prev_file, etc_file))
-
- if etc_file in update_files:
- log.debug("setting hostname to %s" % hostname)
- subprocess.Popen(['hostname', hostname]).communicate()
diff --git a/cloudinit/DataSource.py b/cloudinit/DataSource.py
deleted file mode 100644
index e2a9150d..00000000
--- a/cloudinit/DataSource.py
+++ /dev/null
@@ -1,214 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-
-DEP_FILESYSTEM = "FILESYSTEM"
-DEP_NETWORK = "NETWORK"
-
-import cloudinit.UserDataHandler as ud
-import cloudinit.util as util
-import socket
-
-
-class DataSource:
- userdata = None
- metadata = None
- userdata_raw = None
- cfgname = ""
- # system config (passed in from cloudinit,
- # cloud-config before input from the DataSource)
- sys_cfg = {}
- # datasource config, the cloud-config['datasource']['__name__']
- ds_cfg = {} # datasource config
-
- def __init__(self, sys_cfg=None):
- if not self.cfgname:
- name = str(self.__class__).split(".")[-1]
- if name.startswith("DataSource"):
- name = name[len("DataSource"):]
- self.cfgname = name
- if sys_cfg:
- self.sys_cfg = sys_cfg
-
- self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
- ("datasource", self.cfgname), self.ds_cfg)
-
- def get_userdata(self):
- if self.userdata == None:
- self.userdata = ud.preprocess_userdata(self.userdata_raw)
- return self.userdata
-
- def get_userdata_raw(self):
- return(self.userdata_raw)
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return({})
-
- def get_public_ssh_keys(self):
- keys = []
- if 'public-keys' not in self.metadata:
- return([])
-
- if isinstance(self.metadata['public-keys'], str):
- return(str(self.metadata['public-keys']).splitlines())
-
- if isinstance(self.metadata['public-keys'], list):
- return(self.metadata['public-keys'])
-
- for _keyname, klist in self.metadata['public-keys'].items():
- # lp:506332 uec metadata service responds with
- # data that makes boto populate a string for 'klist' rather
- # than a list.
- if isinstance(klist, str):
- klist = [klist]
- for pkey in klist:
- # there is an empty string at the end of the keylist, trim it
- if pkey:
- keys.append(pkey)
-
- return(keys)
-
- def device_name_to_device(self, _name):
- # translate a 'name' to a device
- # the primary function at this point is on ec2
- # to consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- return(None)
-
- def get_locale(self):
- return('en_US.UTF-8')
-
- def get_local_mirror(self):
- return None
-
- def get_instance_id(self):
- if 'instance-id' not in self.metadata:
- return "iid-datasource"
- return(self.metadata['instance-id'])
-
- def get_hostname(self, fqdn=False):
- defdomain = "localdomain"
- defhost = "localhost"
-
- domain = defdomain
- if not 'local-hostname' in self.metadata:
-
- # this is somewhat questionable really.
- # the cloud datasource was asked for a hostname
- # and didn't have one. raising error might be more appropriate
- # but instead, basically look up the existing hostname
- toks = []
-
- hostname = socket.gethostname()
-
- fqdn = util.get_fqdn_from_hosts(hostname)
-
- if fqdn and fqdn.find(".") > 0:
- toks = str(fqdn).split(".")
- elif hostname:
- toks = [hostname, defdomain]
- else:
- toks = [defhost, defdomain]
-
- else:
- # if there is an ipv4 address in 'local-hostname', then
- # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
- lhost = self.metadata['local-hostname']
- if is_ipv4(lhost):
- toks = "ip-%s" % lhost.replace(".", "-")
- else:
- toks = lhost.split(".")
-
- if len(toks) > 1:
- hostname = toks[0]
- domain = '.'.join(toks[1:])
- else:
- hostname = toks[0]
-
- if fqdn:
- return "%s.%s" % (hostname, domain)
- else:
- return hostname
-
-
-# return a list of classes that have the same depends as 'depends'
-# iterate through cfg_list, loading "DataSourceCollections" modules
-# and calling their "get_datasource_list".
-# return an ordered list of classes that match
-#
-# - modules must be named "DataSource<item>", where 'item' is an entry
-# in cfg_list
-# - if pkglist is given, it will iterate try loading from that package
-# ie, pkglist=[ "foo", "" ]
-# will first try to load foo.DataSource<item>
-# then DataSource<item>
-def list_sources(cfg_list, depends, pkglist=None):
- if pkglist is None:
- pkglist = []
- retlist = []
- for ds_coll in cfg_list:
- for pkg in pkglist:
- if pkg:
- pkg = "%s." % pkg
- try:
- mod = __import__("%sDataSource%s" % (pkg, ds_coll))
- if pkg:
- mod = getattr(mod, "DataSource%s" % ds_coll)
- lister = getattr(mod, "get_datasource_list")
- retlist.extend(lister(depends))
- break
- except:
- raise
- return(retlist)
-
-
-# depends is a list of dependencies (DEP_FILESYSTEM)
-# dslist is a list of 2 item lists
-# dslist = [
-# ( class, ( depends-that-this-class-needs ) )
-# }
-# it returns a list of 'class' that matched these deps exactly
-# it is a helper function for DataSourceCollections
-def list_from_depends(depends, dslist):
- retlist = []
- depset = set(depends)
- for elem in dslist:
- (cls, deps) = elem
- if depset == set(deps):
- retlist.append(cls)
- return(retlist)
-
-
-def is_ipv4(instr):
- """ determine if input string is a ipv4 address. return boolean"""
- toks = instr.split('.')
- if len(toks) != 4:
- return False
-
- try:
- toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
- except:
- return False
-
- return (len(toks) == 4)
diff --git a/cloudinit/DataSourceCloudStack.py b/cloudinit/DataSourceCloudStack.py
deleted file mode 100644
index 5afdf7b6..00000000
--- a/cloudinit/DataSourceCloudStack.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Cosmin Luta
-#
-# Author: Cosmin Luta <q4break@gmail.com>
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-from socket import inet_ntoa
-import time
-import boto.utils as boto_utils
-from struct import pack
-
-
-class DataSourceCloudStack(DataSource.DataSource):
- api_ver = 'latest'
- seeddir = base_seeddir + '/cs'
- metadata_address = None
-
- def __init__(self, sys_cfg=None):
- DataSource.DataSource.__init__(self, sys_cfg)
- # Cloudstack has its metadata/userdata URLs located at
- # http://<default-gateway-ip>/latest/
- self.metadata_address = "http://%s/" % self.get_default_gateway()
-
- def get_default_gateway(self):
- """ Returns the default gateway ip address in the dotted format
- """
- with open("/proc/net/route", "r") as f:
- for line in f.readlines():
- items = line.split("\t")
- if items[1] == "00000000":
- # found the default route, get the gateway
- gw = inet_ntoa(pack("<L", int(items[2], 16)))
- log.debug("found default route, gateway is %s" % gw)
- return gw
-
- def __str__(self):
- return "DataSourceCloudStack"
-
- def get_data(self):
- seedret = {}
- if util.read_optional_seed(seedret, base=self.seeddir + "/"):
- self.userdata_raw = seedret['user-data']
- self.metadata = seedret['meta-data']
- log.debug("using seeded cs data in %s" % self.seeddir)
- return True
-
- try:
- start = time.time()
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
- None, self.metadata_address)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver,
- self.metadata_address)
- log.debug("crawl of metadata service took %ds" %
- (time.time() - start))
- return True
- except Exception as e:
- log.exception(e)
- return False
-
- def get_instance_id(self):
- return self.metadata['instance-id']
-
- def get_availability_zone(self):
- return self.metadata['availability-zone']
-
-datasources = [
- (DataSourceCloudStack, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-]
-
-
-# return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return DataSource.list_from_depends(depends, datasources)
diff --git a/cloudinit/DataSourceConfigDrive.py b/cloudinit/DataSourceConfigDrive.py
deleted file mode 100644
index 2db4a76a..00000000
--- a/cloudinit/DataSourceConfigDrive.py
+++ /dev/null
@@ -1,231 +0,0 @@
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-import os.path
-import os
-import json
-import subprocess
-
-DEFAULT_IID = "iid-dsconfigdrive"
-
-
-class DataSourceConfigDrive(DataSource.DataSource):
- seed = None
- seeddir = base_seeddir + '/config_drive'
- cfg = {}
- userdata_raw = None
- metadata = None
- dsmode = "local"
-
- def __str__(self):
- mstr = "DataSourceConfigDrive[%s]" % self.dsmode
- mstr = mstr + " [seed=%s]" % self.seed
- return(mstr)
-
- def get_data(self):
- found = None
- md = {}
- ud = ""
-
- defaults = {"instance-id": DEFAULT_IID, "dsmode": "pass"}
-
- if os.path.isdir(self.seeddir):
- try:
- (md, ud) = read_config_drive_dir(self.seeddir)
- found = self.seeddir
- except nonConfigDriveDir:
- pass
-
- if not found:
- dev = cfg_drive_device()
- if dev:
- try:
- (md, ud) = util.mount_callback_umount(dev,
- read_config_drive_dir)
- found = dev
- except (nonConfigDriveDir, util.mountFailedError):
- pass
-
- if not found:
- return False
-
- if 'dsconfig' in md:
- self.cfg = md['dscfg']
-
- md = util.mergedict(md, defaults)
-
- # update interfaces and ifup only on the local datasource
- # this way the DataSourceConfigDriveNet doesn't do it also.
- if 'network-interfaces' in md and self.dsmode == "local":
- if md['dsmode'] == "pass":
- log.info("updating network interfaces from configdrive")
- else:
- log.debug("updating network interfaces from configdrive")
-
- util.write_file("/etc/network/interfaces",
- md['network-interfaces'])
- try:
- (out, err) = util.subp(['ifup', '--all'])
- if len(out) or len(err):
- log.warn("ifup --all had stderr: %s" % err)
-
- except subprocess.CalledProcessError as exc:
- log.warn("ifup --all failed: %s" % (exc.output[1]))
-
- self.seed = found
- self.metadata = md
- self.userdata_raw = ud
-
- if md['dsmode'] == self.dsmode:
- return True
-
- log.debug("%s: not claiming datasource, dsmode=%s" %
- (self, md['dsmode']))
- return False
-
- def get_public_ssh_keys(self):
- if not 'public-keys' in self.metadata:
- return([])
- return(self.metadata['public-keys'])
-
- # the data sources' config_obj is a cloud-config formated
- # object that came to it from ways other than cloud-config
- # because cloud-config content would be handled elsewhere
- def get_config_obj(self):
- return(self.cfg)
-
-
-class DataSourceConfigDriveNet(DataSourceConfigDrive):
- dsmode = "net"
-
-
-class nonConfigDriveDir(Exception):
- pass
-
-
-def cfg_drive_device():
- """ get the config drive device. return a string like '/dev/vdb'
- or None (if there is no non-root device attached). This does not
- check the contents, only reports that if there *were* a config_drive
- attached, it would be this device.
- per config_drive documentation, this is
- "associated as the last available disk on the instance"
- """
-
- if 'CLOUD_INIT_CONFIG_DRIVE_DEVICE' in os.environ:
- return(os.environ['CLOUD_INIT_CONFIG_DRIVE_DEVICE'])
-
- # we are looking for a raw block device (sda, not sda1) with a vfat
- # filesystem on it.
-
- letters = "abcdefghijklmnopqrstuvwxyz"
- devs = util.find_devs_with("TYPE=vfat")
-
- # filter out anything not ending in a letter (ignore partitions)
- devs = [f for f in devs if f[-1] in letters]
-
- # sort them in reverse so "last" device is first
- devs.sort(reverse=True)
-
- if len(devs):
- return(devs[0])
-
- return(None)
-
-
-def read_config_drive_dir(source_dir):
- """
- read_config_drive_dir(source_dir):
- read source_dir, and return a tuple with metadata dict and user-data
- string populated. If not a valid dir, raise a nonConfigDriveDir
- """
- md = {}
- ud = ""
-
- flist = ("etc/network/interfaces", "root/.ssh/authorized_keys", "meta.js")
- found = [f for f in flist if os.path.isfile("%s/%s" % (source_dir, f))]
- keydata = ""
-
- if len(found) == 0:
- raise nonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
-
- if "etc/network/interfaces" in found:
- with open("%s/%s" % (source_dir, "/etc/network/interfaces")) as fp:
- md['network-interfaces'] = fp.read()
-
- if "root/.ssh/authorized_keys" in found:
- with open("%s/%s" % (source_dir, "root/.ssh/authorized_keys")) as fp:
- keydata = fp.read()
-
- meta_js = {}
-
- if "meta.js" in found:
- content = ''
- with open("%s/%s" % (source_dir, "meta.js")) as fp:
- content = fp.read()
- md['meta_js'] = content
- try:
- meta_js = json.loads(content)
- except ValueError:
- raise nonConfigDriveDir("%s: %s" %
- (source_dir, "invalid json in meta.js"))
-
- keydata = meta_js.get('public-keys', keydata)
-
- if keydata:
- lines = keydata.splitlines()
- md['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
-
- for copy in ('dsmode', 'instance-id', 'dscfg'):
- if copy in meta_js:
- md[copy] = meta_js[copy]
-
- if 'user-data' in meta_js:
- ud = meta_js['user-data']
-
- return(md, ud)
-
-datasources = (
- (DataSourceConfigDrive, (DataSource.DEP_FILESYSTEM, )),
- (DataSourceConfigDriveNet,
- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-)
-
-
-# return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
-
-if __name__ == "__main__":
- def main():
- import sys
- import pprint
- print cfg_drive_device()
- (md, ud) = read_config_drive_dir(sys.argv[1])
- print "=== md ==="
- pprint.pprint(md)
- print "=== ud ==="
- print(ud)
-
- main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/DataSourceEc2.py b/cloudinit/DataSourceEc2.py
deleted file mode 100644
index 7051ecda..00000000
--- a/cloudinit/DataSourceEc2.py
+++ /dev/null
@@ -1,217 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-import socket
-import time
-import boto.utils as boto_utils
-import os.path
-
-
-class DataSourceEc2(DataSource.DataSource):
- api_ver = '2009-04-04'
- seeddir = base_seeddir + '/ec2'
- metadata_address = "http://169.254.169.254"
-
- def __str__(self):
- return("DataSourceEc2")
-
- def get_data(self):
- seedret = {}
- if util.read_optional_seed(seedret, base=self.seeddir + "/"):
- self.userdata_raw = seedret['user-data']
- self.metadata = seedret['meta-data']
- log.debug("using seeded ec2 data in %s" % self.seeddir)
- return True
-
- try:
- if not self.wait_for_metadata_service():
- return False
- start = time.time()
- self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
- None, self.metadata_address)
- self.metadata = boto_utils.get_instance_metadata(self.api_ver,
- self.metadata_address)
- log.debug("crawl of metadata service took %ds" % (time.time() -
- start))
- return True
- except Exception as e:
- print e
- return False
-
- def get_instance_id(self):
- return(self.metadata['instance-id'])
-
- def get_availability_zone(self):
- return(self.metadata['placement']['availability-zone'])
-
- def get_local_mirror(self):
- return(self.get_mirror_from_availability_zone())
-
- def get_mirror_from_availability_zone(self, availability_zone=None):
- # availability is like 'us-west-1b' or 'eu-west-1a'
- if availability_zone == None:
- availability_zone = self.get_availability_zone()
-
- fallback = None
-
- if self.is_vpc():
- return fallback
-
- try:
- host = "%s.ec2.archive.ubuntu.com" % availability_zone[:-1]
- socket.getaddrinfo(host, None, 0, socket.SOCK_STREAM)
- return 'http://%s/ubuntu/' % host
- except:
- return fallback
-
- def wait_for_metadata_service(self):
- mcfg = self.ds_cfg
-
- if not hasattr(mcfg, "get"):
- mcfg = {}
-
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(log)
- log.warn("Failed to get max wait. using %s" % max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(log)
- log.warn("Failed to get timeout, using %s" % timeout)
-
- def_mdurls = ["http://169.254.169.254", "http://instance-data:8773"]
- mdurls = mcfg.get("metadata_urls", def_mdurls)
-
- # Remove addresses from the list that wont resolve.
- filtered = [x for x in mdurls if util.is_resolvable_url(x)]
-
- if set(filtered) != set(mdurls):
- log.debug("removed the following from metadata urls: %s" %
- list((set(mdurls) - set(filtered))))
-
- if len(filtered):
- mdurls = filtered
- else:
- log.warn("Empty metadata url list! using default list")
- mdurls = def_mdurls
-
- urls = []
- url2base = {False: False}
- for url in mdurls:
- cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
- urls.append(cur)
- url2base[cur] = url
-
- starttime = time.time()
- url = util.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=log.warn)
-
- if url:
- log.debug("Using metadata source: '%s'" % url2base[url])
- else:
- log.critical("giving up on md after %i seconds\n" %
- int(time.time() - starttime))
-
- self.metadata_address = url2base[url]
- return (bool(url))
-
- def device_name_to_device(self, name):
- # consult metadata service, that has
- # ephemeral0: sdb
- # and return 'sdb' for input 'ephemeral0'
- if 'block-device-mapping' not in self.metadata:
- return(None)
-
- found = None
- for entname, device in self.metadata['block-device-mapping'].items():
- if entname == name:
- found = device
- break
- # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
- if entname == "ephemeral" and name == "ephemeral0":
- found = device
- if found == None:
- log.debug("unable to convert %s to a device" % name)
- return None
-
- # LP: #611137
- # the metadata service may believe that devices are named 'sda'
- # when the kernel named them 'vda' or 'xvda'
- # we want to return the correct value for what will actually
- # exist in this instance
- mappings = {"sd": ("vd", "xvd")}
- ofound = found
- short = os.path.basename(found)
-
- if not found.startswith("/"):
- found = "/dev/%s" % found
-
- if os.path.exists(found):
- return(found)
-
- for nfrom, tlist in mappings.items():
- if not short.startswith(nfrom):
- continue
- for nto in tlist:
- cand = "/dev/%s%s" % (nto, short[len(nfrom):])
- if os.path.exists(cand):
- log.debug("remapped device name %s => %s" % (found, cand))
- return(cand)
-
- # on t1.micro, ephemeral0 will appear in block-device-mapping from
- # metadata, but it will not exist on disk (and never will)
- # at this pint, we've verified that the path did not exist
- # in the special case of 'ephemeral0' return None to avoid bogus
- # fstab entry (LP: #744019)
- if name == "ephemeral0":
- return None
- return ofound
-
- def is_vpc(self):
- # per comment in LP: #615545
- ph = "public-hostname"
- p4 = "public-ipv4"
- if ((ph not in self.metadata or self.metadata[ph] == "") and
- (p4 not in self.metadata or self.metadata[p4] == "")):
- return True
- return False
-
-
-datasources = [
- (DataSourceEc2, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-]
-
-
-# return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
diff --git a/cloudinit/DataSourceMAAS.py b/cloudinit/DataSourceMAAS.py
deleted file mode 100644
index e3e62057..00000000
--- a/cloudinit/DataSourceMAAS.py
+++ /dev/null
@@ -1,345 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-import errno
-import oauth.oauth as oauth
-import os.path
-import urllib2
-import time
-
-
-MD_VERSION = "2012-03-01"
-
-
-class DataSourceMAAS(DataSource.DataSource):
- """
- DataSourceMAAS reads instance information from MAAS.
- Given a config metadata_url, and oauth tokens, it expects to find
- files under the root named:
- instance-id
- user-data
- hostname
- """
- seeddir = base_seeddir + '/maas'
- baseurl = None
-
- def __str__(self):
- return("DataSourceMAAS[%s]" % self.baseurl)
-
- def get_data(self):
- mcfg = self.ds_cfg
-
- try:
- (userdata, metadata) = read_maas_seed_dir(self.seeddir)
- self.userdata_raw = userdata
- self.metadata = metadata
- self.baseurl = self.seeddir
- return True
- except MAASSeedDirNone:
- pass
- except MAASSeedDirMalformed as exc:
- log.warn("%s was malformed: %s\n" % (self.seeddir, exc))
- raise
-
- try:
- # if there is no metadata_url, then we're not configured
- url = mcfg.get('metadata_url', None)
- if url == None:
- return False
-
- if not self.wait_for_metadata_service(url):
- return False
-
- self.baseurl = url
-
- (userdata, metadata) = read_maas_seed_url(self.baseurl,
- self.md_headers)
- self.userdata_raw = userdata
- self.metadata = metadata
- return True
- except Exception:
- util.logexc(log)
- return False
-
- def md_headers(self, url):
- mcfg = self.ds_cfg
-
- # if we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return({})
-
- consumer_secret = mcfg.get('consumer_secret', "")
-
- return(oauth_headers(url=url, consumer_key=mcfg['consumer_key'],
- token_key=mcfg['token_key'], token_secret=mcfg['token_secret'],
- consumer_secret=consumer_secret))
-
- def wait_for_metadata_service(self, url):
- mcfg = self.ds_cfg
-
- max_wait = 120
- try:
- max_wait = int(mcfg.get("max_wait", max_wait))
- except Exception:
- util.logexc(log)
- log.warn("Failed to get max wait. using %s" % max_wait)
-
- if max_wait == 0:
- return False
-
- timeout = 50
- try:
- timeout = int(mcfg.get("timeout", timeout))
- except Exception:
- util.logexc(log)
- log.warn("Failed to get timeout, using %s" % timeout)
-
- starttime = time.time()
- check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
- url = util.wait_for_url(urls=[check_url], max_wait=max_wait,
- timeout=timeout, status_cb=log.warn,
- headers_cb=self.md_headers)
-
- if url:
- log.debug("Using metadata source: '%s'" % url)
- else:
- log.critical("giving up on md after %i seconds\n" %
- int(time.time() - starttime))
-
- return (bool(url))
-
-
-def read_maas_seed_dir(seed_d):
- """
- Return user-data and metadata for a maas seed dir in seed_d.
- Expected format of seed_d are the following files:
- * instance-id
- * local-hostname
- * user-data
- """
- files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
- md = {}
-
- if not os.path.isdir(seed_d):
- raise MAASSeedDirNone("%s: not a directory")
-
- for fname in files:
- try:
- with open(os.path.join(seed_d, fname)) as fp:
- md[fname] = fp.read()
- fp.close()
- except IOError as e:
- if e.errno != errno.ENOENT:
- raise
-
- return(check_seed_contents(md, seed_d))
-
-
-def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
- version=MD_VERSION):
- """
- Read the maas datasource at seed_url.
- header_cb is a method that should return a headers dictionary that will
- be given to urllib2.Request()
-
- Expected format of seed_url is are the following files:
- * <seed_url>/<version>/meta-data/instance-id
- * <seed_url>/<version>/meta-data/local-hostname
- * <seed_url>/<version>/user-data
- """
- files = ('meta-data/local-hostname',
- 'meta-data/instance-id',
- 'meta-data/public-keys',
- 'user-data')
-
- base_url = "%s/%s" % (seed_url, version)
- md = {}
- for fname in files:
- url = "%s/%s" % (base_url, fname)
- if header_cb:
- headers = header_cb(url)
- else:
- headers = {}
-
- try:
- req = urllib2.Request(url, data=None, headers=headers)
- resp = urllib2.urlopen(req, timeout=timeout)
- md[os.path.basename(fname)] = resp.read()
- except urllib2.HTTPError as e:
- if e.code != 404:
- raise
-
- return(check_seed_contents(md, seed_url))
-
-
-def check_seed_contents(content, seed):
- """Validate if content is Is the content a dict that is valid as a
- return for a datasource.
- Either return a (userdata, metadata) tuple or
- Raise MAASSeedDirMalformed or MAASSeedDirNone
- """
- md_required = ('instance-id', 'local-hostname')
- found = content.keys()
-
- if len(content) == 0:
- raise MAASSeedDirNone("%s: no data files found" % seed)
-
- missing = [k for k in md_required if k not in found]
- if len(missing):
- raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
-
- userdata = content.get('user-data', "")
- md = {}
- for (key, val) in content.iteritems():
- if key == 'user-data':
- continue
- md[key] = val
-
- return(userdata, md)
-
-
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
- consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
- token = oauth.OAuthToken(token_key, token_secret)
- params = {
- 'oauth_version': "1.0",
- 'oauth_nonce': oauth.generate_nonce(),
- 'oauth_timestamp': int(time.time()),
- 'oauth_token': token.key,
- 'oauth_consumer_key': consumer.key,
- }
- req = oauth.OAuthRequest(http_url=url, parameters=params)
- req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
- consumer, token)
- return(req.to_header())
-
-
-class MAASSeedDirNone(Exception):
- pass
-
-
-class MAASSeedDirMalformed(Exception):
- pass
-
-
-datasources = [
- (DataSourceMAAS, (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-]
-
-
-# return a list of data sources that match this set of dependencies
-def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
-
-
-if __name__ == "__main__":
- def main():
- """
- Call with single argument of directory or http or https url.
- If url is given additional arguments are allowed, which will be
- interpreted as consumer_key, token_key, token_secret, consumer_secret
- """
- import argparse
- import pprint
-
- parser = argparse.ArgumentParser(description='Interact with MAAS DS')
- parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
- parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
- parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
- parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
- parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
- parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)", default=MD_VERSION)
-
- subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
- subcmds.add_parser('crawl', help="crawl the datasource")
- subcmds.add_parser('get', help="do a single GET of provided url")
- subcmds.add_parser('check-seed', help="read andn verify seed at url")
-
- parser.add_argument("url", help="the data source to query")
-
- args = parser.parse_args()
-
- creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
-
- if args.config:
- import yaml
- with open(args.config) as fp:
- cfg = yaml.safe_load(fp)
- if 'datasource' in cfg:
- cfg = cfg['datasource']['MAAS']
- for key in creds.keys():
- if key in cfg and creds[key] == None:
- creds[key] = cfg[key]
-
- def geturl(url, headers_cb):
- req = urllib2.Request(url, data=None, headers=headers_cb(url))
- return(urllib2.urlopen(req).read())
-
- def printurl(url, headers_cb):
- print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
-
- def crawl(url, headers_cb=None):
- if url.endswith("/"):
- for line in geturl(url, headers_cb).splitlines():
- if line.endswith("/"):
- crawl("%s%s" % (url, line), headers_cb)
- else:
- printurl("%s%s" % (url, line), headers_cb)
- else:
- printurl(url, headers_cb)
-
- def my_headers(url):
- headers = {}
- if creds.get('consumer_key', None) != None:
- headers = oauth_headers(url, **creds)
- return headers
-
- if args.subcmd == "check-seed":
- if args.url.startswith("http"):
- (userdata, metadata) = read_maas_seed_url(args.url,
- header_cb=my_headers, version=args.apiver)
- else:
- (userdata, metadata) = read_maas_seed_url(args.url)
- print "=== userdata ==="
- print userdata
- print "=== metadata ==="
- pprint.pprint(metadata)
-
- elif args.subcmd == "get":
- printurl(args.url, my_headers)
-
- elif args.subcmd == "crawl":
- if not args.url.endswith("/"):
- args.url = "%s/" % args.url
- crawl(args.url, my_headers)
-
- main()
diff --git a/cloudinit/SshUtil.py b/cloudinit/SshUtil.py
deleted file mode 100644
index a081fbe8..00000000
--- a/cloudinit/SshUtil.py
+++ /dev/null
@@ -1,227 +0,0 @@
-#!/usr/bin/python
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2012 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import os
-import os.path
-import cloudinit.util as util
-
-
-class AuthKeyEntry():
- # lines are options, keytype, base64-encoded key, comment
- # man page says the following which I did not understand:
- # The options field is optional; its presence is determined by whether
- # the line starts with a number or not (the options field never starts
- # with a number)
- options = None
- keytype = None
- base64 = None
- comment = None
- is_comment = False
- line_in = ""
-
- def __init__(self, line, def_opt=None):
- line = line.rstrip("\n\r")
- self.line_in = line
- if line.startswith("#") or line.strip() == "":
- self.is_comment = True
- else:
- ent = line.strip()
- toks = ent.split(None, 3)
- if len(toks) == 1:
- self.base64 = toks[0]
- elif len(toks) == 2:
- (self.base64, self.comment) = toks
- elif len(toks) == 3:
- (self.keytype, self.base64, self.comment) = toks
- elif len(toks) == 4:
- i = 0
- ent = line.strip()
- quoted = False
- # taken from auth_rsa_key_allowed in auth-rsa.c
- try:
- while (i < len(ent) and
- ((quoted) or (ent[i] not in (" ", "\t")))):
- curc = ent[i]
- nextc = ent[i + 1]
- if curc == "\\" and nextc == '"':
- i = i + 1
- elif curc == '"':
- quoted = not quoted
- i = i + 1
- except IndexError:
- self.is_comment = True
- return
-
- try:
- self.options = ent[0:i]
- (self.keytype, self.base64, self.comment) = \
- ent[i + 1:].split(None, 3)
- except ValueError:
- # we did not understand this line
- self.is_comment = True
-
- if self.options == None and def_opt:
- self.options = def_opt
-
- return
-
- def debug(self):
- print("line_in=%s\ncomment: %s\noptions=%s\nkeytype=%s\nbase64=%s\n"
- "comment=%s\n" % (self.line_in, self.is_comment, self.options,
- self.keytype, self.base64, self.comment)),
-
- def __repr__(self):
- if self.is_comment:
- return(self.line_in)
- else:
- toks = []
- for e in (self.options, self.keytype, self.base64, self.comment):
- if e:
- toks.append(e)
-
- return(' '.join(toks))
-
-
-def update_authorized_keys(fname, keys):
- # keys is a list of AuthKeyEntries
- # key_prefix is the prefix (options) to prepend
- try:
- fp = open(fname, "r")
- lines = fp.readlines() # lines have carriage return
- fp.close()
- except IOError:
- lines = []
-
- ka_stats = {} # keys_added status
- for k in keys:
- ka_stats[k] = False
-
- to_add = []
- for key in keys:
- to_add.append(key)
-
- for i in range(0, len(lines)):
- ent = AuthKeyEntry(lines[i])
- for k in keys:
- if k.base64 == ent.base64 and not k.is_comment:
- ent = k
- try:
- to_add.remove(k)
- except ValueError:
- pass
- lines[i] = str(ent)
-
- # now append any entries we did not match above
- for key in to_add:
- lines.append(str(key))
-
- if len(lines) == 0:
- return("")
- else:
- return('\n'.join(lines) + "\n")
-
-
-def setup_user_keys(keys, user, key_prefix, log=None):
- import pwd
- saved_umask = os.umask(077)
-
- pwent = pwd.getpwnam(user)
-
- ssh_dir = '%s/.ssh' % pwent.pw_dir
- if not os.path.exists(ssh_dir):
- os.mkdir(ssh_dir)
- os.chown(ssh_dir, pwent.pw_uid, pwent.pw_gid)
-
- try:
- ssh_cfg = parse_ssh_config()
- akeys = ssh_cfg.get("AuthorizedKeysFile", "%h/.ssh/authorized_keys")
- akeys = akeys.replace("%h", pwent.pw_dir)
- akeys = akeys.replace("%u", user)
- if not akeys.startswith('/'):
- akeys = os.path.join(pwent.pw_dir, akeys)
- authorized_keys = akeys
- except Exception:
- authorized_keys = '%s/.ssh/authorized_keys' % pwent.pw_dir
- if log:
- util.logexc(log)
-
- key_entries = []
- for k in keys:
- ke = AuthKeyEntry(k, def_opt=key_prefix)
- key_entries.append(ke)
-
- content = update_authorized_keys(authorized_keys, key_entries)
- util.write_file(authorized_keys, content, 0600)
-
- os.chown(authorized_keys, pwent.pw_uid, pwent.pw_gid)
- util.restorecon_if_possible(ssh_dir, recursive=True)
-
- os.umask(saved_umask)
-
-
-def parse_ssh_config(fname="/etc/ssh/sshd_config"):
- ret = {}
- fp = open(fname)
- for l in fp.readlines():
- l = l.strip()
- if not l or l.startswith("#"):
- continue
- key, val = l.split(None, 1)
- ret[key] = val
- fp.close()
- return(ret)
-
-if __name__ == "__main__":
- def main():
- import sys
- # usage: orig_file, new_keys, [key_prefix]
- # prints out merged, where 'new_keys' will trump old
- ## example
- ## ### begin auth_keys ###
- # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= smoser-work
- # ssh-rsa AAAAB3NzaC1xxxxxxxxxCmXp5Kt5/82cD/VN3NtHw== smoser@brickies
- # ### end authorized_keys ###
- #
- # ### begin new_keys ###
- # ssh-rsa nonmatch smoser@newhost
- # ssh-rsa AAAAB3NzaC1xxxxxxxxxV3csgm8cJn7UveKHkYjJp8= new_comment
- # ### end new_keys ###
- #
- # Then run as:
- # program auth_keys new_keys \
- # 'no-port-forwarding,command=\"echo hi world;\"'
- def_prefix = None
- orig_key_file = sys.argv[1]
- new_key_file = sys.argv[2]
- if len(sys.argv) > 3:
- def_prefix = sys.argv[3]
- fp = open(new_key_file)
-
- newkeys = []
- for line in fp.readlines():
- newkeys.append(AuthKeyEntry(line, def_prefix))
-
- fp.close()
- print update_authorized_keys(orig_key_file, newkeys)
-
- main()
-
-# vi: ts=4 expandtab
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
deleted file mode 100644
index bf694a8e..00000000
--- a/cloudinit/UserDataHandler.py
+++ /dev/null
@@ -1,262 +0,0 @@
-# vi: ts=4 expandtab
-#
-# Copyright (C) 2009-2010 Canonical Ltd.
-# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
-#
-# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 3, as
-# published by the Free Software Foundation.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-import email
-
-from email.mime.multipart import MIMEMultipart
-from email.mime.text import MIMEText
-from email.mime.base import MIMEBase
-import yaml
-import cloudinit
-import cloudinit.util as util
-import hashlib
-import urllib
-
-
-starts_with_mappings = {
- '#include': 'text/x-include-url',
- '#include-once': 'text/x-include-once-url',
- '#!': 'text/x-shellscript',
- '#cloud-config': 'text/cloud-config',
- '#upstart-job': 'text/upstart-job',
- '#part-handler': 'text/part-handler',
- '#cloud-boothook': 'text/cloud-boothook',
- '#cloud-config-archive': 'text/cloud-config-archive',
-}
-
-
-# if 'string' is compressed return decompressed otherwise return it
-def decomp_str(string):
- import StringIO
- import gzip
- try:
- uncomp = gzip.GzipFile(None, "rb", 1, StringIO.StringIO(string)).read()
- return(uncomp)
- except:
- return(string)
-
-
-def do_include(content, appendmsg):
- import os
- # is just a list of urls, one per line
- # also support '#include <url here>'
- includeonce = False
- for line in content.splitlines():
- if line == "#include":
- continue
- if line == "#include-once":
- includeonce = True
- continue
- if line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
- includeonce = True
- elif line.startswith("#include"):
- line = line[len("#include"):].lstrip()
- if line.startswith("#"):
- continue
- if line.strip() == "":
- continue
-
- # urls cannot not have leading or trailing white space
- msum = hashlib.md5() # pylint: disable=E1101
- msum.update(line.strip())
- includeonce_filename = "%s/urlcache/%s" % (
- cloudinit.get_ipath_cur("data"), msum.hexdigest())
- try:
- if includeonce and os.path.isfile(includeonce_filename):
- with open(includeonce_filename, "r") as fp:
- content = fp.read()
- else:
- content = urllib.urlopen(line).read()
- if includeonce:
- util.write_file(includeonce_filename, content, mode=0600)
- except Exception:
- raise
-
- process_includes(message_from_string(decomp_str(content)), appendmsg)
-
-
-def explode_cc_archive(archive, appendmsg):
- for ent in yaml.safe_load(archive):
- # ent can be one of:
- # dict { 'filename' : 'value', 'content' : 'value', 'type' : 'value' }
- # filename and type not be present
- # or
- # scalar(payload)
-
- def_type = "text/cloud-config"
- if isinstance(ent, str):
- ent = {'content': ent}
-
- content = ent.get('content', '')
- mtype = ent.get('type', None)
- if mtype == None:
- mtype = type_from_startswith(content, def_type)
-
- maintype, subtype = mtype.split('/', 1)
- if maintype == "text":
- msg = MIMEText(content, _subtype=subtype)
- else:
- msg = MIMEBase(maintype, subtype)
- msg.set_payload(content)
-
- if 'filename' in ent:
- msg.add_header('Content-Disposition', 'attachment',
- filename=ent['filename'])
-
- for header in ent.keys():
- if header in ('content', 'filename', 'type'):
- continue
- msg.add_header(header, ent['header'])
-
- _attach_part(appendmsg, msg)
-
-
-def multi_part_count(outermsg, newcount=None):
- """
- Return the number of attachments to this MIMEMultipart by looking
- at its 'Number-Attachments' header.
- """
- nfield = 'Number-Attachments'
- if nfield not in outermsg:
- outermsg[nfield] = "0"
-
- if newcount != None:
- outermsg.replace_header(nfield, str(newcount))
-
- return(int(outermsg.get('Number-Attachments', 0)))
-
-
-def _attach_part(outermsg, part):
- """
- Attach an part to an outer message. outermsg must be a MIMEMultipart.
- Modifies a header in outermsg to keep track of number of attachments.
- """
- cur = multi_part_count(outermsg)
- if not part.get_filename(None):
- part.add_header('Content-Disposition', 'attachment',
- filename='part-%03d' % (cur + 1))
- outermsg.attach(part)
- multi_part_count(outermsg, cur + 1)
-
-
-def type_from_startswith(payload, default=None):
- # slist is sorted longest first
- slist = sorted(starts_with_mappings.keys(), key=lambda e: 0 - len(e))
- for sstr in slist:
- if payload.startswith(sstr):
- return(starts_with_mappings[sstr])
- return default
-
-
-def process_includes(msg, appendmsg=None):
- if appendmsg == None:
- appendmsg = MIMEMultipart()
-
- for part in msg.walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
- continue
-
- ctype = None
- ctype_orig = part.get_content_type()
-
- payload = part.get_payload(decode=True)
-
- if ctype_orig in ("text/plain", "text/x-not-multipart"):
- ctype = type_from_startswith(payload)
-
- if ctype is None:
- ctype = ctype_orig
-
- if ctype in ('text/x-include-url', 'text/x-include-once-url'):
- do_include(payload, appendmsg)
- continue
-
- if ctype == "text/cloud-config-archive":
- explode_cc_archive(payload, appendmsg)
- continue
-
- if 'Content-Type' in msg:
- msg.replace_header('Content-Type', ctype)
- else:
- msg['Content-Type'] = ctype
-
- _attach_part(appendmsg, part)
-
-
-def message_from_string(data, headers=None):
- if headers is None:
- headers = {}
- if "mime-version:" in data[0:4096].lower():
- msg = email.message_from_string(data)
- for (key, val) in headers.items():
- if key in msg:
- msg.replace_header(key, val)
- else:
- msg[key] = val
- else:
- mtype = headers.get("Content-Type", "text/x-not-multipart")
- maintype, subtype = mtype.split("/", 1)
- msg = MIMEBase(maintype, subtype, *headers)
- msg.set_payload(data)
-
- return(msg)
-
-
-# this is heavily wasteful, reads through userdata string input
-def preprocess_userdata(data):
- newmsg = MIMEMultipart()
- process_includes(message_from_string(decomp_str(data)), newmsg)
- return(newmsg.as_string())
-
-
-# callback is a function that will be called with (data, content_type,
-# filename, payload)
-def walk_userdata(istr, callback, data=None):
- partnum = 0
- for part in message_from_string(istr).walk():
- # multipart/* are just containers
- if part.get_content_maintype() == 'multipart':
- continue
-
- ctype = part.get_content_type()
- if ctype is None:
- ctype = 'application/octet-stream'
-
- filename = part.get_filename()
- if not filename:
- filename = 'part-%03d' % partnum
-
- callback(data, ctype, filename, part.get_payload(decode=True))
-
- partnum = partnum + 1
-
-
-if __name__ == "__main__":
- def main():
- import sys
- data = decomp_str(file(sys.argv[1]).read())
- newmsg = MIMEMultipart()
- process_includes(message_from_string(data), newmsg)
- print newmsg
- print "#found %s parts" % multi_part_count(newmsg)
-
- main()
diff --git a/cloudinit/__init__.py b/cloudinit/__init__.py
index dafb128a..da124641 100644
--- a/cloudinit/__init__.py
+++ b/cloudinit/__init__.py
@@ -1,11 +1,12 @@
# vi: ts=4 expandtab
#
-# Common code for the EC2 initialisation scripts in Ubuntu
-# Copyright (C) 2008-2009 Canonical Ltd
+# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
-# Author: Soren Hansen <soren@canonical.com>
+# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,650 +19,3 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-
-varlibdir = '/var/lib/cloud'
-cur_instance_link = varlibdir + "/instance"
-boot_finished = cur_instance_link + "/boot-finished"
-system_config = '/etc/cloud/cloud.cfg'
-seeddir = varlibdir + "/seed"
-cfg_env_name = "CLOUD_CFG"
-
-cfg_builtin = """
-log_cfgs: []
-datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MAAS", "Ec2", "CloudStack"]
-def_log_file: /var/log/cloud-init.log
-syslog_fix_perms: syslog:adm
-"""
-logger_name = "cloudinit"
-
-pathmap = {
- "handlers": "/handlers",
- "scripts": "/scripts",
- "sem": "/sem",
- "boothooks": "/boothooks",
- "userdata_raw": "/user-data.txt",
- "userdata": "/user-data.txt.i",
- "obj_pkl": "/obj.pkl",
- "cloud_config": "/cloud-config.txt",
- "data": "/data",
- None: "",
-}
-
-per_instance = "once-per-instance"
-per_always = "always"
-per_once = "once"
-
-parsed_cfgs = {}
-
-import os
-
-import cPickle
-import sys
-import os.path
-import errno
-import subprocess
-import yaml
-import logging
-import logging.config
-import StringIO
-import glob
-import traceback
-
-import cloudinit.util as util
-
-
-class NullHandler(logging.Handler):
- def emit(self, record):
- pass
-
-
-log = logging.getLogger(logger_name)
-log.addHandler(NullHandler())
-
-
-def logging_set_from_cfg_file(cfg_file=system_config):
- logging_set_from_cfg(util.get_base_cfg(cfg_file, cfg_builtin, parsed_cfgs))
-
-
-def logging_set_from_cfg(cfg):
- log_cfgs = []
- logcfg = util.get_cfg_option_str(cfg, "log_cfg", False)
- if logcfg:
- # if there is a 'logcfg' entry in the config, respect
- # it, it is the old keyname
- log_cfgs = [logcfg]
- elif "log_cfgs" in cfg:
- for cfg in cfg['log_cfgs']:
- if isinstance(cfg, list):
- log_cfgs.append('\n'.join(cfg))
- else:
- log_cfgs.append()
-
- if not len(log_cfgs):
- sys.stderr.write("Warning, no logging configured\n")
- return
-
- for logcfg in log_cfgs:
- try:
- logging.config.fileConfig(StringIO.StringIO(logcfg))
- return
- except:
- pass
-
- raise Exception("no valid logging found\n")
-
-
-import cloudinit.DataSource as DataSource
-import cloudinit.UserDataHandler as UserDataHandler
-
-
-class CloudInit:
- cfg = None
- part_handlers = {}
- old_conffile = '/etc/ec2-init/ec2-config.cfg'
- ds_deps = [DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK]
- datasource = None
- cloud_config_str = ''
- datasource_name = ''
-
- builtin_handlers = []
-
- def __init__(self, ds_deps=None, sysconfig=system_config):
- self.builtin_handlers = [
- ['text/x-shellscript', self.handle_user_script, per_always],
- ['text/cloud-config', self.handle_cloud_config, per_always],
- ['text/upstart-job', self.handle_upstart_job, per_instance],
- ['text/cloud-boothook', self.handle_cloud_boothook, per_always],
- ]
-
- if ds_deps != None:
- self.ds_deps = ds_deps
-
- self.sysconfig = sysconfig
-
- self.cfg = self.read_cfg()
-
- def read_cfg(self):
- if self.cfg:
- return(self.cfg)
-
- try:
- conf = util.get_base_cfg(self.sysconfig, cfg_builtin, parsed_cfgs)
- except Exception:
- conf = get_builtin_cfg()
-
- # support reading the old ConfigObj format file and merging
- # it into the yaml dictionary
- try:
- from configobj import ConfigObj
- oldcfg = ConfigObj(self.old_conffile)
- if oldcfg is None:
- oldcfg = {}
- conf = util.mergedict(conf, oldcfg)
- except:
- pass
-
- return(conf)
-
- def restore_from_cache(self):
- try:
- # we try to restore from a current link and static path
- # by using the instance link, if purge_cache was called
- # the file wont exist
- cache = get_ipath_cur('obj_pkl')
- f = open(cache, "rb")
- data = cPickle.load(f)
- f.close()
- self.datasource = data
- return True
- except:
- return False
-
- def write_to_cache(self):
- cache = self.get_ipath("obj_pkl")
- try:
- os.makedirs(os.path.dirname(cache))
- except OSError as e:
- if e.errno != errno.EEXIST:
- return False
-
- try:
- f = open(cache, "wb")
- cPickle.dump(self.datasource, f)
- f.close()
- os.chmod(cache, 0400)
- except:
- raise
-
- def get_data_source(self):
- if self.datasource is not None:
- return True
-
- if self.restore_from_cache():
- log.debug("restored from cache type %s" % self.datasource)
- return True
-
- cfglist = self.cfg['datasource_list']
- dslist = list_sources(cfglist, self.ds_deps)
- dsnames = [f.__name__ for f in dslist]
-
- log.debug("searching for data source in %s" % dsnames)
- for cls in dslist:
- ds = cls.__name__
- try:
- s = cls(sys_cfg=self.cfg)
- if s.get_data():
- self.datasource = s
- self.datasource_name = ds
- log.debug("found data source %s" % ds)
- return True
- except Exception as e:
- log.warn("get_data of %s raised %s" % (ds, e))
- util.logexc(log)
- msg = "Did not find data source. searched classes: %s" % dsnames
- log.debug(msg)
- raise DataSourceNotFoundException(msg)
-
- def set_cur_instance(self):
- try:
- os.unlink(cur_instance_link)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- iid = self.get_instance_id()
- os.symlink("./instances/%s" % iid, cur_instance_link)
- idir = self.get_ipath()
- dlist = []
- for d in ["handlers", "scripts", "sem"]:
- dlist.append("%s/%s" % (idir, d))
-
- util.ensure_dirs(dlist)
-
- ds = "%s: %s\n" % (self.datasource.__class__, str(self.datasource))
- dp = self.get_cpath('data')
- util.write_file("%s/%s" % (idir, 'datasource'), ds)
- util.write_file("%s/%s" % (dp, 'previous-datasource'), ds)
- util.write_file("%s/%s" % (dp, 'previous-instance-id'), "%s\n" % iid)
-
- def get_userdata(self):
- return(self.datasource.get_userdata())
-
- def get_userdata_raw(self):
- return(self.datasource.get_userdata_raw())
-
- def get_instance_id(self):
- return(self.datasource.get_instance_id())
-
- def update_cache(self):
- self.write_to_cache()
- self.store_userdata()
-
- def store_userdata(self):
- util.write_file(self.get_ipath('userdata_raw'),
- self.datasource.get_userdata_raw(), 0600)
- util.write_file(self.get_ipath('userdata'),
- self.datasource.get_userdata(), 0600)
-
- def sem_getpath(self, name, freq):
- if freq == 'once-per-instance':
- return("%s/%s" % (self.get_ipath("sem"), name))
- return("%s/%s.%s" % (get_cpath("sem"), name, freq))
-
- def sem_has_run(self, name, freq):
- if freq == per_always:
- return False
- semfile = self.sem_getpath(name, freq)
- if os.path.exists(semfile):
- return True
- return False
-
- def sem_acquire(self, name, freq):
- from time import time
- semfile = self.sem_getpath(name, freq)
-
- try:
- os.makedirs(os.path.dirname(semfile))
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise e
-
- if os.path.exists(semfile) and freq != per_always:
- return False
-
- # race condition
- try:
- f = open(semfile, "w")
- f.write("%s\n" % str(time()))
- f.close()
- except:
- return(False)
- return(True)
-
- def sem_clear(self, name, freq):
- semfile = self.sem_getpath(name, freq)
- try:
- os.unlink(semfile)
- except OSError as e:
- if e.errno != errno.ENOENT:
- return False
-
- return True
-
- # acquire lock on 'name' for given 'freq'
- # if that does not exist, then call 'func' with given 'args'
- # if 'clear_on_fail' is True and func throws an exception
- # then remove the lock (so it would run again)
- def sem_and_run(self, semname, freq, func, args=None, clear_on_fail=False):
- if args is None:
- args = []
- if self.sem_has_run(semname, freq):
- log.debug("%s already ran %s", semname, freq)
- return False
- try:
- if not self.sem_acquire(semname, freq):
- raise Exception("Failed to acquire lock on %s" % semname)
-
- func(*args)
- except:
- if clear_on_fail:
- self.sem_clear(semname, freq)
- raise
-
- return True
-
- # get_ipath : get the instance path for a name in pathmap
- # (/var/lib/cloud/instances/<instance>/name)<name>)
- def get_ipath(self, name=None):
- return("%s/instances/%s%s"
- % (varlibdir, self.get_instance_id(), pathmap[name]))
-
- def consume_userdata(self, frequency=per_instance):
- self.get_userdata()
- data = self
-
- cdir = get_cpath("handlers")
- idir = self.get_ipath("handlers")
-
- # add the path to the plugins dir to the top of our list for import
- # instance dir should be read before cloud-dir
- sys.path.insert(0, cdir)
- sys.path.insert(0, idir)
-
- part_handlers = {}
- # add handlers in cdir
- for fname in glob.glob("%s/*.py" % cdir):
- if not os.path.isfile(fname):
- continue
- modname = os.path.basename(fname)[0:-3]
- try:
- mod = __import__(modname)
- handler_register(mod, part_handlers, data, frequency)
- log.debug("added handler for [%s] from %s" % (mod.list_types(),
- fname))
- except:
- log.warn("failed to initialize handler in %s" % fname)
- util.logexc(log)
-
- # add the internal handers if their type hasn't been already claimed
- for (btype, bhand, bfreq) in self.builtin_handlers:
- if btype in part_handlers:
- continue
- handler_register(InternalPartHandler(bhand, [btype], bfreq),
- part_handlers, data, frequency)
-
- # walk the data
- pdata = {'handlers': part_handlers, 'handlerdir': idir,
- 'data': data, 'frequency': frequency}
- UserDataHandler.walk_userdata(self.get_userdata(),
- partwalker_callback, data=pdata)
-
- # give callbacks opportunity to finalize
- called = []
- for (_mtype, mod) in part_handlers.iteritems():
- if mod in called:
- continue
- handler_call_end(mod, data, frequency)
-
- def handle_user_script(self, _data, ctype, filename, payload, _frequency):
- if ctype == "__end__":
- return
- if ctype == "__begin__":
- # maybe delete existing things here
- return
-
- filename = filename.replace(os.sep, '_')
- scriptsdir = get_ipath_cur('scripts')
- util.write_file("%s/%s" %
- (scriptsdir, filename), util.dos2unix(payload), 0700)
-
- def handle_upstart_job(self, _data, ctype, filename, payload, frequency):
- # upstart jobs are only written on the first boot
- if frequency != per_instance:
- return
-
- if ctype == "__end__" or ctype == "__begin__":
- return
- if not filename.endswith(".conf"):
- filename = filename + ".conf"
-
- util.write_file("%s/%s" % ("/etc/init", filename),
- util.dos2unix(payload), 0644)
-
- def handle_cloud_config(self, _data, ctype, filename, payload, _frequency):
- if ctype == "__begin__":
- self.cloud_config_str = ""
- return
- if ctype == "__end__":
- cloud_config = self.get_ipath("cloud_config")
- util.write_file(cloud_config, self.cloud_config_str, 0600)
-
- ## this could merge the cloud config with the system config
- ## for now, not doing this as it seems somewhat circular
- ## as CloudConfig does that also, merging it with this cfg
- ##
- # ccfg = yaml.safe_load(self.cloud_config_str)
- # if ccfg is None: ccfg = {}
- # self.cfg = util.mergedict(ccfg, self.cfg)
-
- return
-
- self.cloud_config_str += "\n#%s\n%s" % (filename, payload)
-
- def handle_cloud_boothook(self, _data, ctype, filename, payload,
- _frequency):
- if ctype == "__end__":
- return
- if ctype == "__begin__":
- return
-
- filename = filename.replace(os.sep, '_')
- payload = util.dos2unix(payload)
- prefix = "#cloud-boothook"
- start = 0
- if payload.startswith(prefix):
- start = len(prefix) + 1
-
- boothooks_dir = self.get_ipath("boothooks")
- filepath = "%s/%s" % (boothooks_dir, filename)
- util.write_file(filepath, payload[start:], 0700)
- try:
- env = os.environ.copy()
- env['INSTANCE_ID'] = self.datasource.get_instance_id()
- subprocess.check_call([filepath], env=env)
- except subprocess.CalledProcessError as e:
- log.error("boothooks script %s returned %i" %
- (filepath, e.returncode))
- except Exception as e:
- log.error("boothooks unknown exception %s when running %s" %
- (e, filepath))
-
- def get_public_ssh_keys(self):
- return(self.datasource.get_public_ssh_keys())
-
- def get_locale(self):
- return(self.datasource.get_locale())
-
- def get_mirror(self):
- return(self.datasource.get_local_mirror())
-
- def get_hostname(self, fqdn=False):
- return(self.datasource.get_hostname(fqdn=fqdn))
-
- def device_name_to_device(self, name):
- return(self.datasource.device_name_to_device(name))
-
- # I really don't know if this should be here or not, but
- # I needed it in cc_update_hostname, where that code had a valid 'cloud'
- # reference, but did not have a cloudinit handle
- # (ie, no cloudinit.get_cpath())
- def get_cpath(self, name=None):
- return(get_cpath(name))
-
-
-def initfs():
- subds = ['scripts/per-instance', 'scripts/per-once', 'scripts/per-boot',
- 'seed', 'instances', 'handlers', 'sem', 'data']
- dlist = []
- for subd in subds:
- dlist.append("%s/%s" % (varlibdir, subd))
- util.ensure_dirs(dlist)
-
- cfg = util.get_base_cfg(system_config, cfg_builtin, parsed_cfgs)
- log_file = util.get_cfg_option_str(cfg, 'def_log_file', None)
- perms = util.get_cfg_option_str(cfg, 'syslog_fix_perms', None)
- if log_file:
- fp = open(log_file, "ab")
- fp.close()
- if log_file and perms:
- (u, g) = perms.split(':', 1)
- if u == "-1" or u == "None":
- u = None
- if g == "-1" or g == "None":
- g = None
- util.chownbyname(log_file, u, g)
-
-
-def purge_cache(rmcur=True):
- rmlist = [boot_finished]
- if rmcur:
- rmlist.append(cur_instance_link)
- for f in rmlist:
- try:
- os.unlink(f)
- except OSError as e:
- if e.errno == errno.ENOENT:
- continue
- return(False)
- except:
- return(False)
- return(True)
-
-
-# get_ipath_cur: get the current instance path for an item
-def get_ipath_cur(name=None):
- return("%s/%s%s" % (varlibdir, "instance", pathmap[name]))
-
-
-# get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
-# for a name in dirmap
-def get_cpath(name=None):
- return("%s%s" % (varlibdir, pathmap[name]))
-
-
-def get_base_cfg(cfg_path=None):
- if cfg_path is None:
- cfg_path = system_config
- return(util.get_base_cfg(cfg_path, cfg_builtin, parsed_cfgs))
-
-
-def get_builtin_cfg():
- return(yaml.safe_load(cfg_builtin))
-
-
-class DataSourceNotFoundException(Exception):
- pass
-
-
-def list_sources(cfg_list, depends):
- return(DataSource.list_sources(cfg_list, depends, ["cloudinit", ""]))
-
-
-def handler_register(mod, part_handlers, data, frequency=per_instance):
- if not hasattr(mod, "handler_version"):
- setattr(mod, "handler_version", 1)
-
- for mtype in mod.list_types():
- part_handlers[mtype] = mod
-
- handler_call_begin(mod, data, frequency)
- return(mod)
-
-
-def handler_call_begin(mod, data, frequency):
- handler_handle_part(mod, data, "__begin__", None, None, frequency)
-
-
-def handler_call_end(mod, data, frequency):
- handler_handle_part(mod, data, "__end__", None, None, frequency)
-
-
-def handler_handle_part(mod, data, ctype, filename, payload, frequency):
- # only add the handler if the module should run
- modfreq = getattr(mod, "frequency", per_instance)
- if not (modfreq == per_always or
- (frequency == per_instance and modfreq == per_instance)):
- return
- try:
- if mod.handler_version == 1:
- mod.handle_part(data, ctype, filename, payload)
- else:
- mod.handle_part(data, ctype, filename, payload, frequency)
- except:
- util.logexc(log)
- traceback.print_exc(file=sys.stderr)
-
-
-def partwalker_handle_handler(pdata, _ctype, _filename, payload):
- curcount = pdata['handlercount']
- modname = 'part-handler-%03d' % curcount
- frequency = pdata['frequency']
-
- modfname = modname + ".py"
- util.write_file("%s/%s" % (pdata['handlerdir'], modfname), payload, 0600)
-
- try:
- mod = __import__(modname)
- handler_register(mod, pdata['handlers'], pdata['data'], frequency)
- pdata['handlercount'] = curcount + 1
- except:
- util.logexc(log)
- traceback.print_exc(file=sys.stderr)
-
-
-def partwalker_callback(pdata, ctype, filename, payload):
- # data here is the part_handlers array and then the data to pass through
- if ctype == "text/part-handler":
- if 'handlercount' not in pdata:
- pdata['handlercount'] = 0
- partwalker_handle_handler(pdata, ctype, filename, payload)
- return
- if ctype not in pdata['handlers'] and payload:
- if ctype == "text/x-not-multipart":
- # Extract the first line or 24 bytes for displaying in the log
- start = payload.split("\n", 1)[0][:24]
- if start < payload:
- details = "starting '%s...'" % start.encode("string-escape")
- else:
- details = repr(payload)
- log.warning("Unhandled non-multipart userdata %s", details)
- return
- handler_handle_part(pdata['handlers'][ctype], pdata['data'],
- ctype, filename, payload, pdata['frequency'])
-
-
-class InternalPartHandler:
- freq = per_instance
- mtypes = []
- handler_version = 1
- handler = None
-
- def __init__(self, handler, mtypes, frequency, version=2):
- self.handler = handler
- self.mtypes = mtypes
- self.frequency = frequency
- self.handler_version = version
-
- def __repr__(self):
- return("InternalPartHandler: [%s]" % self.mtypes)
-
- def list_types(self):
- return(self.mtypes)
-
- def handle_part(self, data, ctype, filename, payload, frequency):
- return(self.handler(data, ctype, filename, payload, frequency))
-
-
-def get_cmdline_url(names=('cloud-config-url', 'url'),
- starts="#cloud-config", cmdline=None):
-
- if cmdline == None:
- cmdline = util.get_cmdline()
-
- data = util.keyval_str_to_dict(cmdline)
- url = None
- key = None
- for key in names:
- if key in data:
- url = data[key]
- break
- if url == None:
- return (None, None, None)
-
- contents = util.readurl(url)
-
- if contents.startswith(starts):
- return (key, url, contents)
-
- return (key, url, None)
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
new file mode 100644
index 00000000..22d9167e
--- /dev/null
+++ b/cloudinit/cloud.py
@@ -0,0 +1,101 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import copy
+import os
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+# This class is the high level wrapper that provides
+# access to cloud-init objects without exposing the stage objects
+# to handler and or module manipulation. It allows for cloud
+# init to restrict what those types of user facing code may see
+# and or adjust (which helps avoid code messing with each other)
+#
+# It also provides util functions that avoid having to know
+# how to get a certain member from this submembers as well
+# as providing a backwards compatible object that can be maintained
+# while the stages/other objects can be worked on independently...
+
+
+class Cloud(object):
+ def __init__(self, datasource, paths, cfg, distro, runners):
+ self.datasource = datasource
+ self.paths = paths
+ self.distro = distro
+ self._cfg = cfg
+ self._runners = runners
+
+ # If a 'user' manipulates logging or logging services
+ # it is typically useful to cause the logging to be
+ # setup again.
+ def cycle_logging(self):
+ logging.resetLogging()
+ logging.setupLogging(self.cfg)
+
+ @property
+ def cfg(self):
+ # Ensure that not indirectly modified
+ return copy.deepcopy(self._cfg)
+
+ def run(self, name, functor, args, freq=None, clear_on_fail=False):
+ return self._runners.run(name, functor, args, freq, clear_on_fail)
+
+ def get_template_filename(self, name):
+ fn = self.paths.template_tpl % (name)
+ if not os.path.isfile(fn):
+ LOG.warn("No template found at %s for template named %s", fn, name)
+ return None
+ return fn
+
+ # The rest of thes are just useful proxies
+ def get_userdata(self):
+ return self.datasource.get_userdata()
+
+ def get_instance_id(self):
+ return self.datasource.get_instance_id()
+
+ def get_public_ssh_keys(self):
+ return self.datasource.get_public_ssh_keys()
+
+ def get_locale(self):
+ return self.datasource.get_locale()
+
+ def get_local_mirror(self):
+ return self.datasource.get_local_mirror()
+
+ def get_hostname(self, fqdn=False):
+ return self.datasource.get_hostname(fqdn=fqdn)
+
+ def device_name_to_device(self, name):
+ return self.datasource.device_name_to_device(name)
+
+ def get_ipath_cur(self, name=None):
+ return self.paths.get_ipath_cur(name)
+
+ def get_cpath(self, name=None):
+ return self.paths.get_cpath(name)
+
+ def get_ipath(self, name=None):
+ return self.paths.get_ipath(name)
diff --git a/cloudinit/config/__init__.py b/cloudinit/config/__init__.py
new file mode 100644
index 00000000..69a8cc68
--- /dev/null
+++ b/cloudinit/config/__init__.py
@@ -0,0 +1,56 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2008-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Chuck Short <chuck.short@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+from cloudinit.settings import (PER_INSTANCE, FREQUENCIES)
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+# This prefix is used to make it less
+# of a chance that when importing
+# we will not find something else with the same
+# name in the lookup path...
+MOD_PREFIX = "cc_"
+
+
+def form_module_name(name):
+ canon_name = name.replace("-", "_")
+ if canon_name.lower().endswith(".py"):
+ canon_name = canon_name[0:(len(canon_name) - 3)]
+ canon_name = canon_name.strip()
+ if not canon_name:
+ return None
+ if not canon_name.startswith(MOD_PREFIX):
+ canon_name = '%s%s' % (MOD_PREFIX, canon_name)
+ return canon_name
+
+
+def fixup_module(mod, def_freq=PER_INSTANCE):
+ if not hasattr(mod, 'frequency'):
+ setattr(mod, 'frequency', def_freq)
+ else:
+ freq = mod.frequency
+ if freq and freq not in FREQUENCIES:
+ LOG.warn("Module %s has an unknown frequency %s", mod, freq)
+ if not hasattr(mod, 'distros'):
+ setattr(mod, 'distros', None)
+ return mod
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
new file mode 100644
index 00000000..3426099e
--- /dev/null
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -0,0 +1,59 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+#
+# Author: Ben Howard <ben.howard@canonical.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+distros = ['ubuntu', 'debian']
+
+DEFAULT_FILE = "/etc/apt/apt.conf.d/90cloud-init-pipelining"
+
+APT_PIPE_TPL = ("//Written by cloud-init per 'apt_pipelining'\n"
+ 'Acquire::http::Pipeline-Depth "%s";\n')
+
+# Acquire::http::Pipeline-Depth can be a value
+# from 0 to 5 indicating how many outstanding requests APT should send.
+# A value of zero MUST be specified if the remote host does not properly linger
+# on TCP connections - otherwise data corruption will occur.
+
+
+def handle(_name, cfg, cloud, log, _args):
+
+ apt_pipe_value = util.get_cfg_option_str(cfg, "apt_pipelining", False)
+ apt_pipe_value_s = str(apt_pipe_value).lower().strip()
+
+ if apt_pipe_value_s == "false":
+ write_apt_snippet(cloud, "0", log, DEFAULT_FILE)
+ elif apt_pipe_value_s in ("none", "unchanged", "os"):
+ return
+ elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
+ write_apt_snippet(cloud, apt_pipe_value_s, log, DEFAULT_FILE)
+ else:
+ log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
+
+
+def write_apt_snippet(cloud, setting, log, f_name):
+ """ Writes f_name with apt pipeline depth 'setting' """
+
+ file_contents = APT_PIPE_TPL % (setting)
+
+ util.write_file(cloud.paths.join(False, f_name), file_contents)
+
+ log.debug("Wrote %s with apt pipeline depth setting %s", f_name, setting)
diff --git a/cloudinit/config/cc_apt_update_upgrade.py b/cloudinit/config/cc_apt_update_upgrade.py
new file mode 100644
index 00000000..5c5e510c
--- /dev/null
+++ b/cloudinit/config/cc_apt_update_upgrade.py
@@ -0,0 +1,272 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import glob
+import os
+
+from cloudinit import templater
+from cloudinit import util
+
+distros = ['ubuntu', 'debian']
+
+PROXY_TPL = "Acquire::HTTP::Proxy \"%s\";\n"
+PROXY_FN = "/etc/apt/apt.conf.d/95cloud-init-proxy"
+
+# A temporary shell program to get a given gpg key
+# from a given keyserver
+EXPORT_GPG_KEYID = """
+ k=${1} ks=${2};
+ exec 2>/dev/null
+ [ -n "$k" ] || exit 1;
+ armour=$(gpg --list-keys --armour "${k}")
+ if [ -z "${armour}" ]; then
+ gpg --keyserver ${ks} --recv $k >/dev/null &&
+ armour=$(gpg --export --armour "${k}") &&
+ gpg --batch --yes --delete-keys "${k}"
+ fi
+ [ -n "${armour}" ] && echo "${armour}"
+"""
+
+
+def handle(name, cfg, cloud, log, _args):
+ update = util.get_cfg_option_bool(cfg, 'apt_update', False)
+ upgrade = util.get_cfg_option_bool(cfg, 'apt_upgrade', False)
+
+ release = get_release()
+ mirror = find_apt_mirror(cloud, cfg)
+ if not mirror:
+ log.debug(("Skipping module named %s,"
+ " no package 'mirror' located"), name)
+ return
+
+ log.debug("Selected mirror at: %s" % mirror)
+
+ if not util.get_cfg_option_bool(cfg,
+ 'apt_preserve_sources_list', False):
+ generate_sources_list(release, mirror, cloud, log)
+ old_mir = util.get_cfg_option_str(cfg, 'apt_old_mirror',
+ "archive.ubuntu.com/ubuntu")
+ rename_apt_lists(old_mir, mirror)
+
+ # Set up any apt proxy
+ proxy = cfg.get("apt_proxy", None)
+ proxy_filename = PROXY_FN
+ if proxy:
+ try:
+ # See man 'apt.conf'
+ contents = PROXY_TPL % (proxy)
+ util.write_file(cloud.paths.join(False, proxy_filename),
+ contents)
+ except Exception as e:
+ util.logexc(log, "Failed to write proxy to %s", proxy_filename)
+ elif os.path.isfile(proxy_filename):
+ util.del_file(proxy_filename)
+
+ # Process 'apt_sources'
+ if 'apt_sources' in cfg:
+ errors = add_sources(cloud, cfg['apt_sources'],
+ {'MIRROR': mirror, 'RELEASE': release})
+ for e in errors:
+ log.warn("Source Error: %s", ':'.join(e))
+
+ dconf_sel = util.get_cfg_option_str(cfg, 'debconf_selections', False)
+ if dconf_sel:
+ log.debug("setting debconf selections per cloud config")
+ try:
+ util.subp(('debconf-set-selections', '-'), dconf_sel)
+ except:
+ util.logexc(log, "Failed to run debconf-set-selections")
+
+ pkglist = util.get_cfg_option_list(cfg, 'packages', [])
+
+ errors = []
+ if update or len(pkglist) or upgrade:
+ try:
+ cloud.distro.update_package_sources()
+ except Exception as e:
+ util.logexc(log, "Package update failed")
+ errors.append(e)
+
+ if upgrade:
+ try:
+ cloud.distro.package_command("upgrade")
+ except Exception as e:
+ util.logexc(log, "Package upgrade failed")
+ errors.append(e)
+
+ if len(pkglist):
+ try:
+ cloud.distro.install_packages(pkglist)
+ except Exception as e:
+ util.logexc(log, "Failed to install packages: %s ", pkglist)
+ errors.append(e)
+
+ if len(errors):
+ log.warn("%s failed with exceptions, re-raising the last one",
+ len(errors))
+ raise errors[-1]
+
+
+# get gpg keyid from keyserver
+def getkeybyid(keyid, keyserver):
+ with util.ExtendedTemporaryFile(suffix='.sh') as fh:
+ fh.write(EXPORT_GPG_KEYID)
+ fh.flush()
+ cmd = ['/bin/sh', fh.name, keyid, keyserver]
+ (stdout, _stderr) = util.subp(cmd)
+ return stdout.strip()
+
+
+def mirror2lists_fileprefix(mirror):
+ string = mirror
+ # take off http:// or ftp://
+ if string.endswith("/"):
+ string = string[0:-1]
+ pos = string.find("://")
+ if pos >= 0:
+ string = string[pos + 3:]
+ string = string.replace("/", "_")
+ return string
+
+
+def rename_apt_lists(omirror, new_mirror, lists_d="/var/lib/apt/lists"):
+ oprefix = os.path.join(lists_d, mirror2lists_fileprefix(omirror))
+ nprefix = os.path.join(lists_d, mirror2lists_fileprefix(new_mirror))
+ if oprefix == nprefix:
+ return
+ olen = len(oprefix)
+ for filename in glob.glob("%s_*" % oprefix):
+ # TODO use the cloud.paths.join...
+ util.rename(filename, "%s%s" % (nprefix, filename[olen:]))
+
+
+def get_release():
+ (stdout, _stderr) = util.subp(['lsb_release', '-cs'])
+ return stdout.strip()
+
+
+def generate_sources_list(codename, mirror, cloud, log):
+ template_fn = cloud.get_template_filename('sources.list')
+ if template_fn:
+ params = {'mirror': mirror, 'codename': codename}
+ out_fn = cloud.paths.join(False, '/etc/apt/sources.list')
+ templater.render_to_file(template_fn, out_fn, params)
+ else:
+ log.warn("No template found, not rendering /etc/apt/sources.list")
+
+
+def add_sources(cloud, srclist, template_params=None):
+ """
+ add entries in /etc/apt/sources.list.d for each abbreviated
+ sources.list entry in 'srclist'. When rendering template, also
+ include the values in dictionary searchList
+ """
+ if template_params is None:
+ template_params = {}
+
+ errorlist = []
+ for ent in srclist:
+ if 'source' not in ent:
+ errorlist.append(["", "missing source"])
+ continue
+
+ source = ent['source']
+ if source.startswith("ppa:"):
+ try:
+ util.subp(["add-apt-repository", source])
+ except:
+ errorlist.append([source, "add-apt-repository failed"])
+ continue
+
+ source = templater.render_string(source, template_params)
+
+ if 'filename' not in ent:
+ ent['filename'] = 'cloud_config_sources.list'
+
+ if not ent['filename'].startswith("/"):
+ ent['filename'] = os.path.join("/etc/apt/sources.list.d/",
+ ent['filename'])
+
+ if ('keyid' in ent and 'key' not in ent):
+ ks = "keyserver.ubuntu.com"
+ if 'keyserver' in ent:
+ ks = ent['keyserver']
+ try:
+ ent['key'] = getkeybyid(ent['keyid'], ks)
+ except:
+ errorlist.append([source, "failed to get key from %s" % ks])
+ continue
+
+ if 'key' in ent:
+ try:
+ util.subp(('apt-key', 'add', '-'), ent['key'])
+ except:
+ errorlist.append([source, "failed add key"])
+
+ try:
+ contents = "%s\n" % (source)
+ util.write_file(cloud.paths.join(False, ent['filename']),
+ contents, omode="ab")
+ except:
+ errorlist.append([source,
+ "failed write to file %s" % ent['filename']])
+
+ return errorlist
+
+
+def find_apt_mirror(cloud, cfg):
+ """ find an apt_mirror given the cloud and cfg provided """
+
+ mirror = None
+
+ cfg_mirror = cfg.get("apt_mirror", None)
+ if cfg_mirror:
+ mirror = cfg["apt_mirror"]
+ elif "apt_mirror_search" in cfg:
+ mirror = util.search_for_mirror(cfg['apt_mirror_search'])
+ else:
+ mirror = cloud.get_local_mirror()
+
+ mydom = ""
+
+ doms = []
+
+ if not mirror:
+ # if we have a fqdn, then search its domain portion first
+ (_hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ mydom = ".".join(fqdn.split(".")[1:])
+ if mydom:
+ doms.append(".%s" % mydom)
+
+ if not mirror:
+ doms.extend((".localdomain", "",))
+
+ mirror_list = []
+ distro = cloud.distro.name
+ mirrorfmt = "http://%s-mirror%s/%s" % (distro, "%s", distro)
+ for post in doms:
+ mirror_list.append(mirrorfmt % (post))
+
+ mirror = util.search_for_mirror(mirror_list)
+
+ if not mirror:
+ mirror = cloud.distro.get_package_mirror()
+
+ return mirror
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
new file mode 100644
index 00000000..bae1ea54
--- /dev/null
+++ b/cloudinit/config/cc_bootcmd.py
@@ -0,0 +1,55 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+
+def handle(name, cfg, cloud, log, _args):
+
+ if "bootcmd" not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'bootcmd' key in configuration"), name)
+ return
+
+ with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
+ try:
+ content = util.shellify(cfg["bootcmd"])
+ tmpf.write(content)
+ tmpf.flush()
+ except:
+ util.logexc(log, "Failed to shellify bootcmd")
+ raise
+
+ try:
+ env = os.environ.copy()
+ iid = cloud.get_instance_id()
+ if iid:
+ env['INSTANCE_ID'] = str(iid)
+ cmd = ['/bin/sh', tmpf.name]
+ util.subp(cmd, env=env, capture=False)
+ except:
+ util.logexc(log,
+ ("Failed to run bootcmd module %s"), name)
+ raise
diff --git a/cloudinit/CloudConfig/cc_byobu.py b/cloudinit/config/cc_byobu.py
index e821b261..4e2e06bb 100644
--- a/cloudinit/CloudConfig/cc_byobu.py
+++ b/cloudinit/config/cc_byobu.py
@@ -18,18 +18,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import traceback
+from cloudinit import util
+distros = ['ubuntu', 'debian']
-def handle(_name, cfg, _cloud, log, args):
+
+def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
value = args[0]
else:
value = util.get_cfg_option_str(cfg, "byobu_by_default", "")
if not value:
+ log.debug("Skipping module named %s, no 'byobu' values found", name)
return
if value == "user" or value == "system":
@@ -38,7 +39,7 @@ def handle(_name, cfg, _cloud, log, args):
valid = ("enable-user", "enable-system", "enable",
"disable-user", "disable-system", "disable")
if not value in valid:
- log.warn("Unknown value %s for byobu_by_default" % value)
+ log.warn("Unknown value %s for byobu_by_default", value)
mod_user = value.endswith("-user")
mod_sys = value.endswith("-system")
@@ -65,13 +66,6 @@ def handle(_name, cfg, _cloud, log, args):
cmd = ["/bin/sh", "-c", "%s %s %s" % ("X=0;", shcmd, "exit $X")]
- log.debug("setting byobu to %s" % value)
+ log.debug("Setting byobu to %s", value)
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
- except OSError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % (cmd))
+ util.subp(cmd, capture=False)
diff --git a/cloudinit/CloudConfig/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 3af6238a..dc046bda 100644
--- a/cloudinit/CloudConfig/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -13,25 +13,27 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
import os
-from subprocess import check_call
-from cloudinit.util import (write_file, get_cfg_option_list_or_str,
- delete_dir_contents, subp)
+
+from cloudinit import util
CA_CERT_PATH = "/usr/share/ca-certificates/"
CA_CERT_FILENAME = "cloud-init-ca-certs.crt"
CA_CERT_CONFIG = "/etc/ca-certificates.conf"
CA_CERT_SYSTEM_PATH = "/etc/ssl/certs/"
+distros = ['ubuntu', 'debian']
+
def update_ca_certs():
"""
Updates the CA certificate cache on the current machine.
"""
- check_call(["update-ca-certificates"])
+ util.subp(["update-ca-certificates"], capture=False)
-def add_ca_certs(certs):
+def add_ca_certs(paths, certs):
"""
Adds certificates to the system. To actually apply the new certificates
you must also call L{update_ca_certs}.
@@ -39,26 +41,29 @@ def add_ca_certs(certs):
@param certs: A list of certificate strings.
"""
if certs:
- cert_file_contents = "\n".join(certs)
+ # First ensure they are strings...
+ cert_file_contents = "\n".join([str(c) for c in certs])
cert_file_fullpath = os.path.join(CA_CERT_PATH, CA_CERT_FILENAME)
- write_file(cert_file_fullpath, cert_file_contents, mode=0644)
+ cert_file_fullpath = paths.join(False, cert_file_fullpath)
+ util.write_file(cert_file_fullpath, cert_file_contents, mode=0644)
# Append cert filename to CA_CERT_CONFIG file.
- write_file(CA_CERT_CONFIG, "\n%s" % CA_CERT_FILENAME, omode="a")
+ util.write_file(paths.join(False, CA_CERT_CONFIG),
+ "\n%s" % CA_CERT_FILENAME, omode="ab")
-def remove_default_ca_certs():
+def remove_default_ca_certs(paths):
"""
Removes all default trusted CA certificates from the system. To actually
apply the change you must also call L{update_ca_certs}.
"""
- delete_dir_contents(CA_CERT_PATH)
- delete_dir_contents(CA_CERT_SYSTEM_PATH)
- write_file(CA_CERT_CONFIG, "", mode=0644)
+ util.delete_dir_contents(paths.join(False, CA_CERT_PATH))
+ util.delete_dir_contents(paths.join(False, CA_CERT_SYSTEM_PATH))
+ util.write_file(paths.join(False, CA_CERT_CONFIG), "", mode=0644)
debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
- subp(('debconf-set-selections', '-'), debconf_sel)
+ util.subp(('debconf-set-selections', '-'), debconf_sel)
-def handle(_name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
"""
Call to handle ca-cert sections in cloud-config file.
@@ -70,21 +75,25 @@ def handle(_name, cfg, _cloud, log, _args):
"""
# If there isn't a ca-certs section in the configuration don't do anything
if "ca-certs" not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'ca-certs' key in configuration"), name)
return
+
ca_cert_cfg = cfg['ca-certs']
# If there is a remove-defaults option set to true, remove the system
# default trusted CA certs first.
if ca_cert_cfg.get("remove-defaults", False):
- log.debug("removing default certificates")
- remove_default_ca_certs()
+ log.debug("Removing default certificates")
+ remove_default_ca_certs(cloud.paths)
# If we are given any new trusted CA certs to add, add them.
if "trusted" in ca_cert_cfg:
- trusted_certs = get_cfg_option_list_or_str(ca_cert_cfg, "trusted")
+ trusted_certs = util.get_cfg_option_list(ca_cert_cfg, "trusted")
if trusted_certs:
- log.debug("adding %d certificates" % len(trusted_certs))
- add_ca_certs(trusted_certs)
+ log.debug("Adding %d certificates" % len(trusted_certs))
+ add_ca_certs(cloud.paths, trusted_certs)
# Update the system with the new cert configuration.
+ log.debug("Updating certificates")
update_ca_certs()
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
new file mode 100644
index 00000000..6f568261
--- /dev/null
+++ b/cloudinit/config/cc_chef.py
@@ -0,0 +1,129 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Avishai Ish-Shalom <avishai@fewbytes.com>
+# Author: Mike Moulton <mike@meltmedia.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import os
+
+from cloudinit import templater
+from cloudinit import util
+
+RUBY_VERSION_DEFAULT = "1.8"
+
+
+def handle(name, cfg, cloud, log, _args):
+
+ # If there isn't a chef key in the configuration don't do anything
+ if 'chef' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'chef' key in configuration"), name)
+ return
+ chef_cfg = cfg['chef']
+
+ # Ensure the chef directories we use exist
+ c_dirs = [
+ '/etc/chef',
+ '/var/log/chef',
+ '/var/lib/chef',
+ '/var/cache/chef',
+ '/var/backups/chef',
+ '/var/run/chef',
+ ]
+ for d in c_dirs:
+ util.ensure_dir(cloud.paths.join(False, d))
+
+ # Set the validation key based on the presence of either 'validation_key'
+ # or 'validation_cert'. In the case where both exist, 'validation_key'
+ # takes precedence
+ for key in ('validation_key', 'validation_cert'):
+ if key in chef_cfg and chef_cfg[key]:
+ v_fn = cloud.paths.join(False, '/etc/chef/validation.pem')
+ util.write_file(v_fn, chef_cfg[key])
+ break
+
+ # Create the chef config from template
+ template_fn = cloud.get_template_filename('chef_client.rb')
+ if template_fn:
+ iid = str(cloud.datasource.get_instance_id())
+ params = {
+ 'server_url': chef_cfg['server_url'],
+ 'node_name': util.get_cfg_option_str(chef_cfg, 'node_name', iid),
+ 'environment': util.get_cfg_option_str(chef_cfg, 'environment',
+ '_default'),
+ 'validation_name': chef_cfg['validation_name']
+ }
+ out_fn = cloud.paths.join(False, '/etc/chef/client.rb')
+ templater.render_to_file(template_fn, out_fn, params)
+ else:
+ log.warn("No template found, not rendering to /etc/chef/client.rb")
+
+ # set the firstboot json
+ initial_json = {}
+ if 'run_list' in chef_cfg:
+ initial_json['run_list'] = chef_cfg['run_list']
+ if 'initial_attributes' in chef_cfg:
+ initial_attributes = chef_cfg['initial_attributes']
+ for k in list(initial_attributes.keys()):
+ initial_json[k] = initial_attributes[k]
+ firstboot_fn = cloud.paths.join(False, '/etc/chef/firstboot.json')
+ util.write_file(firstboot_fn, json.dumps(initial_json))
+
+ # If chef is not installed, we install chef based on 'install_type'
+ if not os.path.isfile('/usr/bin/chef-client'):
+ install_type = util.get_cfg_option_str(chef_cfg, 'install_type',
+ 'packages')
+ if install_type == "gems":
+ # this will install and run the chef-client from gems
+ chef_version = util.get_cfg_option_str(chef_cfg, 'version', None)
+ ruby_version = util.get_cfg_option_str(chef_cfg, 'ruby_version',
+ RUBY_VERSION_DEFAULT)
+ install_chef_from_gems(cloud.distro, ruby_version, chef_version)
+ # and finally, run chef-client
+ log.debug('Running chef-client')
+ util.subp(['/usr/bin/chef-client',
+ '-d', '-i', '1800', '-s', '20'], capture=False)
+ elif install_type == 'packages':
+ # this will install and run the chef-client from packages
+ cloud.distro.install_packages(('chef',))
+ else:
+ log.warn("Unknown chef install type %s", install_type)
+
+
+def get_ruby_packages(version):
+ # return a list of packages needed to install ruby at version
+ pkgs = ['ruby%s' % version, 'ruby%s-dev' % version]
+ if version == "1.8":
+ pkgs.extend(('libopenssl-ruby1.8', 'rubygems1.8'))
+ return pkgs
+
+
+def install_chef_from_gems(ruby_version, chef_version, distro):
+ distro.install_packages(get_ruby_packages(ruby_version))
+ if not os.path.exists('/usr/bin/gem'):
+ util.sym_link('/usr/bin/gem%s' % ruby_version, '/usr/bin/gem')
+ if not os.path.exists('/usr/bin/ruby'):
+ util.sym_link('/usr/bin/ruby%s' % ruby_version, '/usr/bin/ruby')
+ if chef_version:
+ util.subp(['/usr/bin/gem', 'install', 'chef',
+ '-v %s' % chef_version, '--no-ri',
+ '--no-rdoc', '--bindir', '/usr/bin', '-q'], capture=False)
+ else:
+ util.subp(['/usr/bin/gem', 'install', 'chef',
+ '--no-ri', '--no-rdoc', '--bindir',
+ '/usr/bin', '-q'], capture=False)
diff --git a/cloudinit/config/cc_disable_ec2_metadata.py b/cloudinit/config/cc_disable_ec2_metadata.py
new file mode 100644
index 00000000..3fd2c20f
--- /dev/null
+++ b/cloudinit/config/cc_disable_ec2_metadata.py
@@ -0,0 +1,36 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+REJECT_CMD = ['route', 'add', '-host', '169.254.169.254', 'reject']
+
+
+def handle(name, cfg, _cloud, log, _args):
+ disabled = util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False)
+ if disabled:
+ util.subp(REJECT_CMD, capture=False)
+ else:
+ log.debug(("Skipping module named %s,"
+ " disabling the ec2 route not enabled"), name)
diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
new file mode 100644
index 00000000..b1caca47
--- /dev/null
+++ b/cloudinit/config/cc_final_message.py
@@ -0,0 +1,68 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import templater
+from cloudinit import util
+from cloudinit import version
+
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+FINAL_MESSAGE_DEF = ("Cloud-init v. {{version}} finished at {{timestamp}}."
+ " Up {{uptime}} seconds.")
+
+
+def handle(_name, cfg, cloud, log, args):
+
+ msg_in = None
+ if len(args) != 0:
+ msg_in = args[0]
+ else:
+ msg_in = util.get_cfg_option_str(cfg, "final_message")
+
+ if not msg_in:
+ template_fn = cloud.get_template_filename('final_message')
+ if template_fn:
+ msg_in = util.load_file(template_fn)
+
+ if not msg_in:
+ msg_in = FINAL_MESSAGE_DEF
+
+ uptime = util.uptime()
+ ts = util.time_rfc2822()
+ cver = version.version_string()
+ try:
+ subs = {
+ 'uptime': uptime,
+ 'timestamp': ts,
+ 'version': cver,
+ }
+ util.multi_log("%s\n" % (templater.render_string(msg_in, subs)),
+ console=False, stderr=True)
+ except Exception:
+ util.logexc(log, "Failed to render final message template")
+
+ boot_fin_fn = cloud.paths.boot_finished
+ try:
+ contents = "%s - %s - v. %s\n" % (uptime, ts, cver)
+ util.write_file(boot_fin_fn, contents)
+ except:
+ util.logexc(log, "Failed to write boot finished file %s", boot_fin_fn)
diff --git a/cloudinit/config/cc_foo.py b/cloudinit/config/cc_foo.py
new file mode 100644
index 00000000..95aab4dd
--- /dev/null
+++ b/cloudinit/config/cc_foo.py
@@ -0,0 +1,52 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit.settings import PER_INSTANCE
+
+# Modules are expected to have the following attributes.
+# 1. A required 'handle' method which takes the following params.
+# a) The name will not be this files name, but instead
+# the name specified in configuration (which is the name
+# which will be used to find this module).
+# b) A configuration object that is the result of the merging
+# of cloud configs configuration with legacy configuration
+# as well as any datasource provided configuration
+# c) A cloud object that can be used to access various
+# datasource and paths for the given distro and data provided
+# by the various datasource instance types.
+# d) A argument list that may or may not be empty to this module.
+# Typically those are from module configuration where the module
+# is defined with some extra configuration that will eventually
+# be translated from yaml into arguments to this module.
+# 2. A optional 'frequency' that defines how often this module should be ran.
+# Typically one of PER_INSTANCE, PER_ALWAYS, PER_ONCE. If not
+# provided PER_INSTANCE will be assumed.
+# See settings.py for these constants.
+# 3. A optional 'distros' array/set/tuple that defines the known distros
+# this module will work with (if not all of them). This is used to write
+# a warning out if a module is being ran on a untested distribution for
+# informational purposes. If non existent all distros are assumed and
+# no warning occurs.
+
+frequency = PER_INSTANCE
+
+
+def handle(name, _cfg, _cloud, log, _args):
+ log.debug("Hi from module %s", name)
diff --git a/cloudinit/CloudConfig/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 9f3a7eaf..b3ce6fb6 100644
--- a/cloudinit/CloudConfig/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -18,10 +18,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import traceback
import os
+from cloudinit import util
+
+distros = ['ubuntu', 'debian']
+
def handle(_name, cfg, _cloud, log, _args):
idevs = None
@@ -35,14 +37,14 @@ def handle(_name, cfg, _cloud, log, _args):
if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
(os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
- if idevs == None:
+ if idevs is None:
idevs = ""
- if idevs_empty == None:
+ if idevs_empty is None:
idevs_empty = "true"
else:
- if idevs_empty == None:
+ if idevs_empty is None:
idevs_empty = "false"
- if idevs == None:
+ if idevs is None:
idevs = "/dev/sda"
for dev in ("/dev/sda", "/dev/vda", "/dev/sda1", "/dev/vda1"):
if os.path.exists(dev):
@@ -52,13 +54,14 @@ def handle(_name, cfg, _cloud, log, _args):
# now idevs and idevs_empty are set to determined values
# or, those set by user
- dconf_sel = "grub-pc grub-pc/install_devices string %s\n" % idevs + \
- "grub-pc grub-pc/install_devices_empty boolean %s\n" % idevs_empty
- log.debug("setting grub debconf-set-selections with '%s','%s'" %
+ dconf_sel = (("grub-pc grub-pc/install_devices string %s\n"
+ "grub-pc grub-pc/install_devices_empty boolean %s\n") %
+ (idevs, idevs_empty))
+
+ log.debug("Setting grub debconf-set-selections with '%s','%s'" %
(idevs, idevs_empty))
try:
- util.subp(('debconf-set-selections'), dconf_sel)
+ util.subp(['debconf-set-selections'], dconf_sel)
except:
- log.error("Failed to run debconf-set-selections for grub-dpkg")
- log.debug(traceback.format_exc())
+ util.logexc(log, "Failed to run debconf-set-selections for grub-dpkg")
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
new file mode 100644
index 00000000..ed7af690
--- /dev/null
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -0,0 +1,53 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit.settings import PER_INSTANCE
+from cloudinit import util
+
+frequency = PER_INSTANCE
+
+# This is a tool that cloud init provides
+HELPER_TOOL = '/usr/lib/cloud-init/write-ssh-key-fingerprints'
+
+
+def handle(name, cfg, _cloud, log, _args):
+ if not os.path.exists(HELPER_TOOL):
+ log.warn(("Unable to activate module %s,"
+ " helper tool not found at %s"), name, HELPER_TOOL)
+ return
+
+ fp_blacklist = util.get_cfg_option_list(cfg,
+ "ssh_fp_console_blacklist", [])
+ key_blacklist = util.get_cfg_option_list(cfg,
+ "ssh_key_console_blacklist",
+ ["ssh-dss"])
+
+ try:
+ cmd = [HELPER_TOOL]
+ cmd.append(','.join(fp_blacklist))
+ cmd.append(','.join(key_blacklist))
+ (stdout, _stderr) = util.subp(cmd)
+ util.multi_log("%s\n" % (stdout.strip()),
+ stderr=False, console=True)
+ except:
+ log.warn("Writing keys to the system console failed!")
+ raise
diff --git a/cloudinit/CloudConfig/cc_landscape.py b/cloudinit/config/cc_landscape.py
index a4113cbe..906a6ff7 100644
--- a/cloudinit/CloudConfig/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -19,16 +19,23 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import os.path
-from cloudinit.CloudConfig import per_instance
+
+from StringIO import StringIO
+
from configobj import ConfigObj
-frequency = per_instance
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+LSC_CLIENT_CFG_FILE = "/etc/landscape/client.conf"
-lsc_client_cfg_file = "/etc/landscape/client.conf"
+distros = ['ubuntu']
# defaults taken from stock client.conf in landscape-client 11.07.1.1-0ubuntu2
-lsc_builtincfg = {
+LSC_BUILTIN_CFG = {
'client': {
'log_level': "info",
'url': "https://landscape.canonical.com/message-system",
@@ -38,7 +45,7 @@ lsc_builtincfg = {
}
-def handle(_name, cfg, _cloud, log, _args):
+def handle(_name, cfg, cloud, log, _args):
"""
Basically turn a top level 'landscape' entry with a 'client' dict
and render it to ConfigObj format under '[client]' section in
@@ -47,27 +54,40 @@ def handle(_name, cfg, _cloud, log, _args):
ls_cloudcfg = cfg.get("landscape", {})
- if not isinstance(ls_cloudcfg, dict):
- raise(Exception("'landscape' existed in config, but not a dict"))
+ if not isinstance(ls_cloudcfg, (dict)):
+ raise RuntimeError(("'landscape' key existed in config,"
+ " but not a dictionary type,"
+ " is a %s instead"), util.obj_name(ls_cloudcfg))
- merged = mergeTogether([lsc_builtincfg, lsc_client_cfg_file, ls_cloudcfg])
+ merge_data = [
+ LSC_BUILTIN_CFG,
+ cloud.paths.join(True, LSC_CLIENT_CFG_FILE),
+ ls_cloudcfg,
+ ]
+ merged = merge_together(merge_data)
- if not os.path.isdir(os.path.dirname(lsc_client_cfg_file)):
- os.makedirs(os.path.dirname(lsc_client_cfg_file))
+ lsc_client_fn = cloud.paths.join(False, LSC_CLIENT_CFG_FILE)
+ lsc_dir = cloud.paths.join(False, os.path.dirname(lsc_client_fn))
+ if not os.path.isdir(lsc_dir):
+ util.ensure_dir(lsc_dir)
- with open(lsc_client_cfg_file, "w") as fp:
- merged.write(fp)
+ contents = StringIO()
+ merged.write(contents)
+ contents.flush()
- log.debug("updated %s" % lsc_client_cfg_file)
+ util.write_file(lsc_client_fn, contents.getvalue())
+ log.debug("Wrote landscape config file to %s", lsc_client_fn)
-def mergeTogether(objs):
+def merge_together(objs):
"""
merge together ConfigObj objects or things that ConfigObj() will take in
later entries override earlier
"""
cfg = ConfigObj({})
for obj in objs:
+ if not obj:
+ continue
if isinstance(obj, ConfigObj):
cfg.merge(obj)
else:
diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
new file mode 100644
index 00000000..6feaae9d
--- /dev/null
+++ b/cloudinit/config/cc_locale.py
@@ -0,0 +1,37 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+
+
+def handle(name, cfg, cloud, log, args):
+ if len(args) != 0:
+ locale = args[0]
+ else:
+ locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
+
+ if not locale:
+ log.debug(("Skipping module named %s, "
+ "no 'locale' configuration found"), name)
+ return
+
+ log.debug("Setting locale to %s", locale)
+ locale_cfgfile = util.get_cfg_option_str(cfg, "locale_configfile")
+ cloud.distro.apply_locale(locale, locale_cfgfile)
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
new file mode 100644
index 00000000..2acdbc6f
--- /dev/null
+++ b/cloudinit/config/cc_mcollective.py
@@ -0,0 +1,91 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Marc Cluet <marc.cluet@canonical.com>
+# Based on code by Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+# Used since this can maintain comments
+# and doesn't need a top level section
+from configobj import ConfigObj
+
+from cloudinit import util
+
+PUBCERT_FILE = "/etc/mcollective/ssl/server-public.pem"
+PRICERT_FILE = "/etc/mcollective/ssl/server-private.pem"
+
+
+def handle(name, cfg, cloud, log, _args):
+
+ # If there isn't a mcollective key in the configuration don't do anything
+ if 'mcollective' not in cfg:
+ log.debug(("Skipping module named %s, "
+ "no 'mcollective' key in configuration"), name)
+ return
+
+ mcollective_cfg = cfg['mcollective']
+
+ # Start by installing the mcollective package ...
+ cloud.distro.install_packages(("mcollective",))
+
+ # ... and then update the mcollective configuration
+ if 'conf' in mcollective_cfg:
+ # Read server.cfg values from the
+ # original file in order to be able to mix the rest up
+ server_cfg_fn = cloud.paths.join(True, '/etc/mcollective/server.cfg')
+ mcollective_config = ConfigObj(server_cfg_fn)
+ # See: http://tiny.cc/jh9agw
+ for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
+ if cfg_name == 'public-cert':
+ pubcert_fn = cloud.paths.join(True, PUBCERT_FILE)
+ util.write_file(pubcert_fn, cfg, mode=0644)
+ mcollective_config['plugin.ssl_server_public'] = pubcert_fn
+ mcollective_config['securityprovider'] = 'ssl'
+ elif cfg_name == 'private-cert':
+ pricert_fn = cloud.paths.join(True, PRICERT_FILE)
+ util.write_file(pricert_fn, cfg, mode=0600)
+ mcollective_config['plugin.ssl_server_private'] = pricert_fn
+ mcollective_config['securityprovider'] = 'ssl'
+ else:
+ if isinstance(cfg, (basestring, str)):
+ # Just set it in the 'main' section
+ mcollective_config[cfg_name] = cfg
+ elif isinstance(cfg, (dict)):
+ # Iterate throug the config items, create a section
+ # if it is needed and then add/or create items as needed
+ if cfg_name not in mcollective_config.sections:
+ mcollective_config[cfg_name] = {}
+ for (o, v) in cfg.iteritems():
+ mcollective_config[cfg_name][o] = v
+ else:
+ # Otherwise just try to convert it to a string
+ mcollective_config[cfg_name] = str(cfg)
+ # We got all our config as wanted we'll rename
+ # the previous server.cfg and create our new one
+ old_fn = cloud.paths.join(False, '/etc/mcollective/server.cfg.old')
+ util.rename(server_cfg_fn, old_fn)
+ # Now we got the whole file, write to disk...
+ contents = StringIO()
+ mcollective_config.write(contents)
+ contents = contents.getvalue()
+ server_cfg_rw = cloud.paths.join(False, '/etc/mcollective/server.cfg')
+ util.write_file(server_cfg_rw, contents, mode=0644)
+
+ # Start mcollective
+ util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/CloudConfig/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 6cdd74e8..d3dcf7af 100644
--- a/cloudinit/CloudConfig/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -18,11 +18,17 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import os
-import re
from string import whitespace # pylint: disable=W0402
+import re
+
+from cloudinit import util
+
+# Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
+SHORTNAME_FILTER = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
+SHORTNAME = re.compile(SHORTNAME_FILTER)
+WS = re.compile("[%s]+" % (whitespace))
+
def is_mdname(name):
# return true if this is a metadata service name
@@ -49,38 +55,46 @@ def handle(_name, cfg, cloud, log, _args):
if "mounts" in cfg:
cfgmnt = cfg["mounts"]
- # shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1
- shortname_filter = r"^[x]{0,1}[shv]d[a-z][0-9]*$"
- shortname = re.compile(shortname_filter)
-
for i in range(len(cfgmnt)):
# skip something that wasn't a list
if not isinstance(cfgmnt[i], list):
+ log.warn("Mount option %s not a list, got a %s instead",
+ (i + 1), util.obj_name(cfgmnt[i]))
continue
+ startname = str(cfgmnt[i][0])
+ log.debug("Attempting to determine the real name of %s", startname)
+
# workaround, allow user to specify 'ephemeral'
# rather than more ec2 correct 'ephemeral0'
- if cfgmnt[i][0] == "ephemeral":
+ if startname == "ephemeral":
cfgmnt[i][0] = "ephemeral0"
+ log.debug(("Adjusted mount option %s "
+ "name from ephemeral to ephemeral0"), (i + 1))
- if is_mdname(cfgmnt[i][0]):
- newname = cloud.device_name_to_device(cfgmnt[i][0])
+ if is_mdname(startname):
+ newname = cloud.device_name_to_device(startname)
if not newname:
- log.debug("ignoring nonexistant named mount %s" % cfgmnt[i][0])
+ log.debug("Ignoring nonexistant named mount %s", startname)
cfgmnt[i][1] = None
else:
- if newname.startswith("/"):
- cfgmnt[i][0] = newname
- else:
- cfgmnt[i][0] = "/dev/%s" % newname
+ renamed = newname
+ if not newname.startswith("/"):
+ renamed = "/dev/%s" % newname
+ cfgmnt[i][0] = renamed
+ log.debug("Mapped metadata name %s to %s", startname, renamed)
else:
- if shortname.match(cfgmnt[i][0]):
- cfgmnt[i][0] = "/dev/%s" % cfgmnt[i][0]
+ if SHORTNAME.match(startname):
+ renamed = "/dev/%s" % startname
+ log.debug("Mapped shortname name %s to %s", startname, renamed)
+ cfgmnt[i][0] = renamed
# in case the user did not quote a field (likely fs-freq, fs_passno)
# but do not convert None to 'None' (LP: #898365)
for j in range(len(cfgmnt[i])):
- if isinstance(cfgmnt[i][j], int):
+ if j is None:
+ continue
+ else:
cfgmnt[i][j] = str(cfgmnt[i][j])
for i in range(len(cfgmnt)):
@@ -102,14 +116,18 @@ def handle(_name, cfg, cloud, log, _args):
# for each of the "default" mounts, add them only if no other
# entry has the same device name
for defmnt in defmnts:
- devname = cloud.device_name_to_device(defmnt[0])
+ startname = defmnt[0]
+ devname = cloud.device_name_to_device(startname)
if devname is None:
+ log.debug("Ignoring nonexistant named default mount %s", startname)
continue
if devname.startswith("/"):
defmnt[0] = devname
else:
defmnt[0] = "/dev/%s" % devname
+ log.debug("Mapped default device %s to %s", startname, defmnt[0])
+
cfgmnt_has = False
for cfgm in cfgmnt:
if cfgm[0] == defmnt[0]:
@@ -117,14 +135,22 @@ def handle(_name, cfg, cloud, log, _args):
break
if cfgmnt_has:
+ log.debug(("Not including %s, already"
+ " previously included"), startname)
continue
cfgmnt.append(defmnt)
# now, each entry in the cfgmnt list has all fstab values
# if the second field is None (not the string, the value) we skip it
- actlist = [x for x in cfgmnt if x[1] is not None]
+ actlist = []
+ for x in cfgmnt:
+ if x[1] is None:
+ log.debug("Skipping non-existent device named %s", x[0])
+ else:
+ actlist.append(x)
if len(actlist) == 0:
+ log.debug("No modifications to fstab needed.")
return
comment = "comment=cloudconfig"
@@ -133,7 +159,7 @@ def handle(_name, cfg, cloud, log, _args):
dirs = []
for line in actlist:
# write 'comment' in the fs_mntops, entry, claiming this
- line[3] = "%s,comment=cloudconfig" % line[3]
+ line[3] = "%s,%s" % (line[3], comment)
if line[2] == "swap":
needswap = True
if line[1].startswith("/"):
@@ -141,11 +167,10 @@ def handle(_name, cfg, cloud, log, _args):
cc_lines.append('\t'.join(line))
fstab_lines = []
- fstab = open("/etc/fstab", "r+")
- ws = re.compile("[%s]+" % whitespace)
- for line in fstab.read().splitlines():
+ fstab = util.load_file(cloud.paths.join(True, "/etc/fstab"))
+ for line in fstab.splitlines():
try:
- toks = ws.split(line)
+ toks = WS.split(line)
if toks[3].find(comment) != -1:
continue
except:
@@ -153,27 +178,23 @@ def handle(_name, cfg, cloud, log, _args):
fstab_lines.append(line)
fstab_lines.extend(cc_lines)
-
- fstab.seek(0)
- fstab.write("%s\n" % '\n'.join(fstab_lines))
- fstab.truncate()
- fstab.close()
+ contents = "%s\n" % ('\n'.join(fstab_lines))
+ util.write_file(cloud.paths.join(False, "/etc/fstab"), contents)
if needswap:
try:
util.subp(("swapon", "-a"))
except:
- log.warn("Failed to enable swap")
+ util.logexc(log, "Activating swap via 'swapon -a' failed")
for d in dirs:
- if os.path.exists(d):
- continue
+ real_dir = cloud.paths.join(False, d)
try:
- os.makedirs(d)
+ util.ensure_dir(real_dir)
except:
- log.warn("Failed to make '%s' config-mount\n", d)
+ util.logexc(log, "Failed to make '%s' config-mount", d)
try:
util.subp(("mount", "-a"))
except:
- log.warn("'mount -a' failed")
+ util.logexc(log, "Activating mounts via 'mount -a' failed")
diff --git a/cloudinit/CloudConfig/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index a7ff74e1..ae1349eb 100644
--- a/cloudinit/CloudConfig/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -17,13 +17,22 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from cloudinit.CloudConfig import per_instance
-import cloudinit.util as util
-from time import sleep
-frequency = per_instance
-post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
- 'hostname']
+from cloudinit import templater
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+POST_LIST_ALL = [
+ 'pub_key_dsa',
+ 'pub_key_rsa',
+ 'pub_key_ecdsa',
+ 'instance_id',
+ 'hostname'
+]
# phone_home:
@@ -35,29 +44,33 @@ post_list_all = ['pub_key_dsa', 'pub_key_rsa', 'pub_key_ecdsa', 'instance_id',
# url: http://my.foo.bar/$INSTANCE_ID/
# post: [ pub_key_dsa, pub_key_rsa, pub_key_ecdsa, instance_id
#
-def handle(_name, cfg, cloud, log, args):
+def handle(name, cfg, cloud, log, args):
if len(args) != 0:
ph_cfg = util.read_conf(args[0])
else:
if not 'phone_home' in cfg:
+ log.debug(("Skipping module named %s, "
+ "no 'phone_home' configuration found"), name)
return
ph_cfg = cfg['phone_home']
if 'url' not in ph_cfg:
- log.warn("no 'url' token in phone_home")
+ log.warn(("Skipping module named %s, "
+ "no 'url' found in 'phone_home' configuration"), name)
return
url = ph_cfg['url']
post_list = ph_cfg.get('post', 'all')
- tries = ph_cfg.get('tries', 10)
+ tries = ph_cfg.get('tries')
try:
tries = int(tries)
except:
- log.warn("tries is not an integer. using 10")
tries = 10
+ util.logexc(log, ("Configuration entry 'tries'"
+ " is not an integer, using %s instead"), tries)
if post_list == "all":
- post_list = post_list_all
+ post_list = POST_LIST_ALL
all_keys = {}
all_keys['instance_id'] = cloud.get_instance_id()
@@ -69,38 +82,37 @@ def handle(_name, cfg, cloud, log, args):
'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
}
- for n, path in pubkeys.iteritems():
+ for (n, path) in pubkeys.iteritems():
try:
- fp = open(path, "rb")
- all_keys[n] = fp.read()
- fp.close()
+ all_keys[n] = util.load_file(cloud.paths.join(True, path))
except:
- log.warn("%s: failed to open in phone_home" % path)
+ util.logexc(log, ("%s: failed to open, can not"
+ " phone home that data"), path)
submit_keys = {}
for k in post_list:
if k in all_keys:
submit_keys[k] = all_keys[k]
else:
- submit_keys[k] = "N/A"
- log.warn("requested key %s from 'post' list not available")
+ submit_keys[k] = None
+ log.warn(("Requested key %s from 'post'"
+ " configuration list not available"), k)
- url = util.render_string(url, {'INSTANCE_ID': all_keys['instance_id']})
-
- null_exc = object()
- last_e = null_exc
- for i in range(0, tries):
- try:
- util.readurl(url, submit_keys)
- log.debug("succeeded submit to %s on try %i" % (url, i + 1))
- return
- except Exception as e:
- log.debug("failed to post to %s on try %i" % (url, i + 1))
- last_e = e
- sleep(3)
-
- log.warn("failed to post to %s in %i tries" % (url, tries))
- if last_e is not null_exc:
- raise(last_e)
+ # Get them read to be posted
+ real_submit_keys = {}
+ for (k, v) in submit_keys.iteritems():
+ if v is None:
+ real_submit_keys[k] = 'N/A'
+ else:
+ real_submit_keys[k] = str(v)
- return
+ # Incase the url is parameterized
+ url_params = {
+ 'INSTANCE_ID': all_keys['instance_id'],
+ }
+ url = templater.render_string(url, url_params)
+ try:
+ uhelp.readurl(url, data=real_submit_keys, retries=tries, sec_between=3)
+ except:
+ util.logexc(log, ("Failed to post phone home data to"
+ " %s in %s tries"), url, tries)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
new file mode 100644
index 00000000..467c1496
--- /dev/null
+++ b/cloudinit/config/cc_puppet.py
@@ -0,0 +1,113 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+import os
+import pwd
+import socket
+
+from cloudinit import helpers
+from cloudinit import util
+
+
+def handle(name, cfg, cloud, log, _args):
+ # If there isn't a puppet key in the configuration don't do anything
+ if 'puppet' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'puppet' configuration found"), name)
+ return
+
+ puppet_cfg = cfg['puppet']
+
+ # Start by installing the puppet package ...
+ cloud.distro.install_packages(["puppet"])
+
+ # ... and then update the puppet configuration
+ if 'conf' in puppet_cfg:
+ # Add all sections from the conf object to puppet.conf
+ puppet_conf_fn = cloud.paths.join(True, '/etc/puppet/puppet.conf')
+ contents = util.load_file(puppet_conf_fn)
+ # Create object for reading puppet.conf values
+ puppet_config = helpers.DefaultingConfigParser()
+ # Read puppet.conf values from original file in order to be able to
+ # mix the rest up. First clean them up (TODO is this really needed??)
+ cleaned_lines = [i.lstrip() for i in contents.splitlines()]
+ cleaned_contents = '\n'.join(cleaned_lines)
+ puppet_config.readfp(StringIO(cleaned_contents),
+ filename=puppet_conf_fn)
+ for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
+ # Cert configuration is a special case
+ # Dump the puppet master ca certificate in the correct place
+ if cfg_name == 'ca_cert':
+ # Puppet ssl sub-directory isn't created yet
+ # Create it with the proper permissions and ownership
+ pp_ssl_dir = cloud.paths.join(False, '/var/lib/puppet/ssl')
+ util.ensure_dir(pp_ssl_dir, 0771)
+ util.chownbyid(pp_ssl_dir,
+ pwd.getpwnam('puppet').pw_uid, 0)
+ pp_ssl_certs = cloud.paths.join(False,
+ '/var/lib/puppet/ssl/certs/')
+ util.ensure_dir(pp_ssl_certs)
+ util.chownbyid(pp_ssl_certs,
+ pwd.getpwnam('puppet').pw_uid, 0)
+ pp_ssl_ca_certs = cloud.paths.join(False,
+ ('/var/lib/puppet/'
+ 'ssl/certs/ca.pem'))
+ util.write_file(pp_ssl_ca_certs, cfg)
+ util.chownbyid(pp_ssl_ca_certs,
+ pwd.getpwnam('puppet').pw_uid, 0)
+ else:
+ # Iterate throug the config items, we'll use ConfigParser.set
+ # to overwrite or create new items as needed
+ for (o, v) in cfg.iteritems():
+ if o == 'certname':
+ # Expand %f as the fqdn
+ # TODO should this use the cloud fqdn??
+ v = v.replace("%f", socket.getfqdn())
+ # Expand %i as the instance id
+ v = v.replace("%i", cloud.get_instance_id())
+ # certname needs to be downcased
+ v = v.lower()
+ puppet_config.set(cfg_name, o, v)
+ # We got all our config as wanted we'll rename
+ # the previous puppet.conf and create our new one
+ conf_old_fn = cloud.paths.join(False,
+ '/etc/puppet/puppet.conf.old')
+ util.rename(puppet_conf_fn, conf_old_fn)
+ puppet_conf_rw = cloud.paths.join(False, '/etc/puppet/puppet.conf')
+ util.write_file(puppet_conf_rw, puppet_config.stringify())
+
+ # Set puppet to automatically start
+ if os.path.exists('/etc/default/puppet'):
+ util.subp(['sed', '-i',
+ '-e', 's/^START=.*/START=yes/',
+ '/etc/default/puppet'], capture=False)
+ elif os.path.exists('/bin/systemctl'):
+ util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
+ capture=False)
+ elif os.path.exists('/sbin/chkconfig'):
+ util.subp(['/sbin/chkconfig', 'puppet', 'on'], capture=False)
+ else:
+ log.warn(("Sorry we do not know how to enable"
+ " puppet services on this system"))
+
+ # Start puppetd
+ util.subp(['service', 'puppet', 'start'], capture=False)
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
new file mode 100644
index 00000000..69cd8872
--- /dev/null
+++ b/cloudinit/config/cc_resizefs.py
@@ -0,0 +1,140 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import stat
+import time
+
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+RESIZE_FS_PREFIXES_CMDS = [
+ ('ext', 'resize2fs'),
+ ('xfs', 'xfs_growfs'),
+]
+
+
+def nodeify_path(devpth, where, log):
+ try:
+ st_dev = os.stat(where).st_dev
+ dev = os.makedev(os.major(st_dev), os.minor(st_dev))
+ os.mknod(devpth, 0400 | stat.S_IFBLK, dev)
+ return st_dev
+ except:
+ if util.is_container():
+ log.debug("Inside container, ignoring mknod failure in resizefs")
+ return
+ log.warn("Failed to make device node to resize %s at %s",
+ where, devpth)
+ raise
+
+
+def get_fs_type(st_dev, path, log):
+ try:
+ dev_entries = util.find_devs_with(tag='TYPE', oformat='value',
+ no_cache=True, path=path)
+ if not dev_entries:
+ return None
+ return dev_entries[0].strip()
+ except util.ProcessExecutionError:
+ util.logexc(log, ("Failed to get filesystem type"
+ " of maj=%s, min=%s for path %s"),
+ os.major(st_dev), os.minor(st_dev), path)
+ raise
+
+
+def handle(name, cfg, cloud, log, args):
+ if len(args) != 0:
+ resize_root = args[0]
+ else:
+ resize_root = util.get_cfg_option_str(cfg, "resize_rootfs", True)
+
+ if not util.translate_bool(resize_root):
+ log.debug("Skipping module named %s, resizing disabled", name)
+ return
+
+ # TODO is the directory ok to be used??
+ resize_root_d = util.get_cfg_option_str(cfg, "resize_rootfs_tmp", "/run")
+ resize_root_d = cloud.paths.join(False, resize_root_d)
+ util.ensure_dir(resize_root_d)
+
+ # TODO: allow what is to be resized to be configurable??
+ resize_what = cloud.paths.join(False, "/")
+ with util.ExtendedTemporaryFile(prefix="cloudinit.resizefs.",
+ dir=resize_root_d, delete=True) as tfh:
+ devpth = tfh.name
+
+ # Delete the file so that mknod will work
+ # but don't change the file handle to know that its
+ # removed so that when a later call that recreates
+ # occurs this temporary file will still benefit from
+ # auto deletion
+ tfh.unlink_now()
+
+ st_dev = nodeify_path(devpth, resize_what, log)
+ fs_type = get_fs_type(st_dev, devpth, log)
+ if not fs_type:
+ log.warn("Could not determine filesystem type of %s", resize_what)
+ return
+
+ resizer = None
+ fstype_lc = fs_type.lower()
+ for (pfix, root_cmd) in RESIZE_FS_PREFIXES_CMDS:
+ if fstype_lc.startswith(pfix):
+ resizer = root_cmd
+ break
+
+ if not resizer:
+ log.warn("Not resizing unknown filesystem type %s for %s",
+ fs_type, resize_what)
+ return
+
+ log.debug("Resizing %s (%s) using %s", resize_what, fs_type, resizer)
+ resize_cmd = [resizer, devpth]
+
+ if resize_root == "noblock":
+ # Fork to a child that will run
+ # the resize command
+ util.fork_cb(do_resize, resize_cmd, log)
+ # Don't delete the file now in the parent
+ tfh.delete = False
+ else:
+ do_resize(resize_cmd, log)
+
+ action = 'Resized'
+ if resize_root == "noblock":
+ action = 'Resizing (via forking)'
+ log.debug("%s root filesystem (type=%s, maj=%i, min=%i, val=%s)",
+ action, fs_type, os.major(st_dev), os.minor(st_dev), resize_root)
+
+
+def do_resize(resize_cmd, log):
+ start = time.time()
+ try:
+ util.subp(resize_cmd)
+ except util.ProcessExecutionError:
+ util.logexc(log, "Failed to resize filesystem (cmd=%s)", resize_cmd)
+ raise
+ tot_time = int(time.time() - start)
+ log.debug("Resizing took %s seconds", tot_time)
+ # TODO: Should we add a fsck check after this to make
+ # sure we didn't corrupt anything?
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
new file mode 100644
index 00000000..7a134569
--- /dev/null
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -0,0 +1,102 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+##
+## The purpose of this script is to allow cloud-init to consume
+## rightscale style userdata. rightscale user data is key-value pairs
+## in a url-query-string like format.
+##
+## for cloud-init support, there will be a key named
+## 'CLOUD_INIT_REMOTE_HOOK'.
+##
+## This cloud-config module will
+## - read the blob of data from raw user data, and parse it as key/value
+## - for each key that is found, download the content to
+## the local instance/scripts directory and set them executable.
+## - the files in that directory will be run by the user-scripts module
+## Therefore, this must run before that.
+##
+##
+
+import os
+
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+from urlparse import parse_qs
+
+frequency = PER_INSTANCE
+
+MY_NAME = "cc_rightscale_userdata"
+MY_HOOKNAME = 'CLOUD_INIT_REMOTE_HOOK'
+
+
+def handle(name, _cfg, cloud, log, _args):
+ try:
+ ud = cloud.get_userdata_raw()
+ except:
+ log.warn("Failed to get raw userdata in module %s", name)
+ return
+
+ try:
+ mdict = parse_qs(ud)
+ if not mdict or not MY_HOOKNAME in mdict:
+ log.debug(("Skipping module %s, "
+ "did not find %s in parsed"
+ " raw userdata"), name, MY_HOOKNAME)
+ return
+ except:
+ util.logexc(log, ("Failed to parse query string %s"
+ " into a dictionary"), ud)
+ raise
+
+ wrote_fns = []
+ captured_excps = []
+
+ # These will eventually be then ran by the cc_scripts_user
+ # TODO: maybe this should just be a new user data handler??
+ # Instead of a late module that acts like a user data handler?
+ scripts_d = cloud.get_ipath_cur('scripts')
+ urls = mdict[MY_HOOKNAME]
+ for (i, url) in enumerate(urls):
+ fname = os.path.join(scripts_d, "rightscale-%02i" % (i))
+ try:
+ resp = uhelp.readurl(url)
+ # Ensure its a valid http response (and something gotten)
+ if resp.ok() and resp.contents:
+ util.write_file(fname, str(resp), mode=0700)
+ wrote_fns.append(fname)
+ except Exception as e:
+ captured_excps.append(e)
+ util.logexc(log, "%s failed to read %s and write %s",
+ MY_NAME, url, fname)
+
+ if wrote_fns:
+ log.debug("Wrote out rightscale userdata to %s files", len(wrote_fns))
+
+ if len(wrote_fns) != len(urls):
+ skipped = len(urls) - len(wrote_fns)
+ log.debug("%s urls were skipped or failed", skipped)
+
+ if captured_excps:
+ log.warn("%s failed with exceptions, re-raising the last one",
+ len(captured_excps))
+ raise captured_excps[-1]
diff --git a/cloudinit/CloudConfig/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index ac7f2c74..78327526 100644
--- a/cloudinit/CloudConfig/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -18,16 +18,15 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit
-import logging
-import cloudinit.util as util
-import traceback
+import os
+
+from cloudinit import util
DEF_FILENAME = "20-cloud-config.conf"
DEF_DIR = "/etc/rsyslog.d"
-def handle(_name, cfg, _cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
# rsyslog:
# - "*.* @@192.158.1.1"
# - content: "*.* @@192.0.2.1:10514"
@@ -37,17 +36,18 @@ def handle(_name, cfg, _cloud, log, _args):
# process 'rsyslog'
if not 'rsyslog' in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'rsyslog' key in configuration"), name)
return
def_dir = cfg.get('rsyslog_dir', DEF_DIR)
def_fname = cfg.get('rsyslog_filename', DEF_FILENAME)
files = []
- elst = []
- for ent in cfg['rsyslog']:
+ for i, ent in enumerate(cfg['rsyslog']):
if isinstance(ent, dict):
if not "content" in ent:
- elst.append((ent, "no 'content' entry"))
+ log.warn("No 'content' entry in config entry %s", i + 1)
continue
content = ent['content']
filename = ent.get("filename", def_fname)
@@ -55,47 +55,48 @@ def handle(_name, cfg, _cloud, log, _args):
content = ent
filename = def_fname
+ filename = filename.strip()
+ if not filename:
+ log.warn("Entry %s has an empty filename", i + 1)
+ continue
+
if not filename.startswith("/"):
- filename = "%s/%s" % (def_dir, filename)
+ filename = os.path.join(def_dir, filename)
+ # Truncate filename first time you see it
omode = "ab"
- # truncate filename first time you see it
if filename not in files:
omode = "wb"
files.append(filename)
try:
- util.write_file(filename, content + "\n", omode=omode)
- except Exception as e:
- log.debug(traceback.format_exc(e))
- elst.append((content, "failed to write to %s" % filename))
+ contents = "%s\n" % (content)
+ util.write_file(cloud.paths.join(False, filename),
+ contents, omode=omode)
+ except Exception:
+ util.logexc(log, "Failed to write to %s", filename)
- # need to restart syslogd
+ # Attempt to restart syslogd
restarted = False
try:
- # if this config module is running at cloud-init time
+ # If this config module is running at cloud-init time
# (before rsyslog is running) we don't actually have to
# restart syslog.
#
- # upstart actually does what we want here, in that it doesn't
+ # Upstart actually does what we want here, in that it doesn't
# start a service that wasn't running already on 'restart'
# it will also return failure on the attempt, so 'restarted'
- # won't get set
- log.debug("restarting rsyslog")
+ # won't get set.
+ log.debug("Restarting rsyslog")
util.subp(['service', 'rsyslog', 'restart'])
restarted = True
-
- except Exception as e:
- elst.append(("restart", str(e)))
+ except Exception:
+ util.logexc(log, "Failed restarting rsyslog")
if restarted:
- # this only needs to run if we *actually* restarted
+ # This only needs to run if we *actually* restarted
# syslog above.
- cloudinit.logging_set_from_cfg_file()
- log = logging.getLogger()
- log.debug("rsyslog configured %s" % files)
-
- for e in elst:
- log.warn("rsyslog error: %s\n" % ':'.join(e))
-
- return
+ cloud.cycle_logging()
+ # This should now use rsyslog if
+ # the logging was setup to use it...
+ log.debug("%s configured %s files", name, files)
diff --git a/cloudinit/CloudConfig/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index f7e8c671..65064cfb 100644
--- a/cloudinit/CloudConfig/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -18,15 +18,21 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
+import os
+from cloudinit import util
-def handle(_name, cfg, cloud, log, _args):
+
+def handle(name, cfg, cloud, log, _args):
if "runcmd" not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'runcmd' key in configuration"), name)
return
- outfile = "%s/runcmd" % cloud.get_ipath('scripts')
+
+ out_fn = os.path.join(cloud.get_ipath('scripts'), "runcmd")
+ cmd = cfg["runcmd"]
try:
- content = util.shellify(cfg["runcmd"])
- util.write_file(outfile, content, 0700)
+ content = util.shellify(cmd)
+ util.write_file(cloud.paths.join(False, out_fn), content, 0700)
except:
- log.warn("failed to open %s for runcmd" % outfile)
+ util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/CloudConfig/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 59522ab8..79ed8807 100644
--- a/cloudinit/CloudConfig/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -15,42 +15,46 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
-import os.path
-import subprocess
-import cloudinit.CloudConfig as cc
-import yaml
+from cloudinit import util
-def handle(_name, cfg, _cloud, _log, _args):
+# Note: see http://saltstack.org/topics/installation/
+
+
+def handle(name, cfg, cloud, log, _args):
# If there isn't a salt key in the configuration don't do anything
if 'salt_minion' not in cfg:
+ log.debug(("Skipping module named %s,"
+ " no 'salt_minion' key in configuration"), name)
return
+
salt_cfg = cfg['salt_minion']
+
# Start by installing the salt package ...
- cc.install_packages(("salt-minion",))
- config_dir = '/etc/salt'
- if not os.path.isdir(config_dir):
- os.makedirs(config_dir)
+ cloud.distro.install_packages(["salt-minion"])
+
+ # Ensure we can configure files at the right dir
+ config_dir = cloud.paths.join(False, salt_cfg.get("config_dir",
+ '/etc/salt'))
+ util.ensure_dir(config_dir)
+
# ... and then update the salt configuration
if 'conf' in salt_cfg:
# Add all sections from the conf object to /etc/salt/minion
minion_config = os.path.join(config_dir, 'minion')
- yaml.dump(salt_cfg['conf'],
- file(minion_config, 'w'),
- default_flow_style=False)
+ minion_data = util.yaml_dumps(salt_cfg.get('conf'))
+ util.write_file(minion_config, minion_data)
+
# ... copy the key pair if specified
if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
- pki_dir = '/etc/salt/pki'
- cumask = os.umask(077)
- if not os.path.isdir(pki_dir):
- os.makedirs(pki_dir)
- pub_name = os.path.join(pki_dir, 'minion.pub')
- pem_name = os.path.join(pki_dir, 'minion.pem')
- with open(pub_name, 'w') as f:
- f.write(salt_cfg['public_key'])
- with open(pem_name, 'w') as f:
- f.write(salt_cfg['private_key'])
- os.umask(cumask)
+ pki_dir = cloud.paths.join(False, salt_cfg.get('pki_dir',
+ '/etc/salt/pki'))
+ with util.umask(077):
+ util.ensure_dir(pki_dir)
+ pub_name = os.path.join(pki_dir, 'minion.pub')
+ pem_name = os.path.join(pki_dir, 'minion.pem')
+ util.write_file(pub_name, salt_cfg['public_key'])
+ util.write_file(pem_name, salt_cfg['private_key'])
# Start salt-minion
- subprocess.check_call(['service', 'salt-minion', 'start'])
+ util.subp(['service', 'salt-minion', 'start'], capture=False)
diff --git a/cloudinit/CloudConfig/cc_scripts_per_boot.py b/cloudinit/config/cc_scripts_per_boot.py
index 41a74754..42b987eb 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_boot.py
+++ b/cloudinit/config/cc_scripts_per_boot.py
@@ -18,17 +18,24 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_always
-from cloudinit import get_cpath
+import os
-frequency = per_always
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-boot")
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_ALWAYS
+
+SCRIPT_SUBDIR = 'per-boot'
+
+
+def handle(name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
diff --git a/cloudinit/CloudConfig/cc_scripts_per_instance.py b/cloudinit/config/cc_scripts_per_instance.py
index a2981eab..b5d71c13 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_instance.py
+++ b/cloudinit/config/cc_scripts_per_instance.py
@@ -18,17 +18,24 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_cpath
+import os
-frequency = per_instance
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-instance")
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_INSTANCE
+
+SCRIPT_SUBDIR = 'per-instance'
+
+
+def handle(name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
diff --git a/cloudinit/CloudConfig/cc_scripts_per_once.py b/cloudinit/config/cc_scripts_per_once.py
index a69151da..d77d36d5 100644
--- a/cloudinit/CloudConfig/cc_scripts_per_once.py
+++ b/cloudinit/config/cc_scripts_per_once.py
@@ -18,17 +18,24 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_once
-from cloudinit import get_cpath
+import os
-frequency = per_once
-runparts_path = "%s/%s" % (get_cpath(), "scripts/per-once")
+from cloudinit import util
+from cloudinit.settings import PER_ONCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_ONCE
+
+SCRIPT_SUBDIR = 'per-once'
+
+
+def handle(name, _cfg, cloud, log, _args):
+ # Comes from the following:
+ # https://forums.aws.amazon.com/thread.jspa?threadID=96918
+ runparts_path = os.path.join(cloud.get_cpath(), 'scripts', SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
diff --git a/cloudinit/CloudConfig/cc_scripts_user.py b/cloudinit/config/cc_scripts_user.py
index 933aa4e0..5c53014f 100644
--- a/cloudinit/CloudConfig/cc_scripts_user.py
+++ b/cloudinit/config/cc_scripts_user.py
@@ -18,17 +18,25 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
-from cloudinit import get_ipath_cur
+import os
-frequency = per_instance
-runparts_path = "%s/%s" % (get_ipath_cur(), "scripts")
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
-def handle(_name, _cfg, _cloud, log, _args):
+frequency = PER_INSTANCE
+
+SCRIPT_SUBDIR = 'scripts'
+
+
+def handle(name, _cfg, cloud, log, _args):
+ # This is written to by the user data handlers
+ # Ie, any custom shell scripts that come down
+ # go here...
+ runparts_path = os.path.join(cloud.get_ipath_cur(), SCRIPT_SUBDIR)
try:
util.runparts(runparts_path)
except:
- log.warn("failed to run-parts in %s" % runparts_path)
+ log.warn("Failed to run module %s (%s in %s)",
+ name, SCRIPT_SUBDIR, runparts_path)
raise
diff --git a/cloudinit/CloudConfig/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index acea74d9..b0f27ebf 100644
--- a/cloudinit/CloudConfig/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -18,25 +18,18 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
+from cloudinit import util
-def handle(_name, cfg, cloud, log, _args):
+def handle(name, cfg, cloud, log, _args):
if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
- log.debug("preserve_hostname is set. not setting hostname")
- return(True)
+ log.debug(("Configuration option 'preserve_hostname' is set,"
+ " not setting the hostname in module %s"), name)
+ return
(hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
try:
- set_hostname(hostname, log)
+ log.debug("Setting hostname to %s", hostname)
+ cloud.distro.set_hostname(hostname)
except Exception:
- util.logexc(log)
- log.warn("failed to set hostname to %s\n", hostname)
-
- return(True)
-
-
-def set_hostname(hostname, log):
- util.subp(['hostname', hostname])
- util.write_file("/etc/hostname", "%s\n" % hostname, 0644)
- log.debug("populated /etc/hostname with %s on first boot", hostname)
+ util.logexc(log, "Failed to set hostname to %s", hostname)
diff --git a/cloudinit/CloudConfig/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 9d0bbdb8..ab266741 100644
--- a/cloudinit/CloudConfig/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -18,13 +18,19 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
import sys
-import random
+
+from cloudinit import ssh_util
+from cloudinit import util
+
from string import letters, digits # pylint: disable=W0402
+# We are removing certain 'painful' letters/numbers
+PW_SET = (letters.translate(None, 'loLOI') +
+ digits.translate(None, '01'))
+
-def handle(_name, cfg, _cloud, log, args):
+def handle(_name, cfg, cloud, log, args):
if len(args) != 0:
# if run from command line, and give args, wipe the chpasswd['list']
password = args[0]
@@ -62,68 +68,79 @@ def handle(_name, cfg, _cloud, log, args):
ch_in = '\n'.join(plist_in)
try:
+ log.debug("Changing password for %s:", users)
util.subp(['chpasswd'], ch_in)
- log.debug("changed password for %s:" % users)
except Exception as e:
errors.append(e)
- log.warn("failed to set passwords with chpasswd: %s" % e)
+ util.logexc(log,
+ "Failed to set passwords with chpasswd for %s", users)
if len(randlist):
- sys.stdout.write("%s\n%s\n" % ("Set the following passwords\n",
- '\n'.join(randlist)))
+ blurb = ("Set the following 'random' passwords\n",
+ '\n'.join(randlist))
+ sys.stderr.write("%s\n%s\n" % blurb)
if expire:
- enum = len(errors)
+ expired_users = []
for u in users:
try:
util.subp(['passwd', '--expire', u])
+ expired_users.append(u)
except Exception as e:
errors.append(e)
- log.warn("failed to expire account for %s" % u)
- if enum == len(errors):
- log.debug("expired passwords for: %s" % u)
+ util.logexc(log, "Failed to set 'expire' for %s", u)
+ if expired_users:
+ log.debug("Expired passwords for: %s users", expired_users)
+ change_pwauth = False
+ pw_auth = None
if 'ssh_pwauth' in cfg:
- val = str(cfg['ssh_pwauth']).lower()
- if val in ("true", "1", "yes"):
- pw_auth = "yes"
- change_pwauth = True
- elif val in ("false", "0", "no"):
- pw_auth = "no"
- change_pwauth = True
- else:
- change_pwauth = False
+ change_pwauth = True
+ if util.is_true(cfg['ssh_pwauth']):
+ pw_auth = 'yes'
+ if util.is_false(cfg['ssh_pwauth']):
+ pw_auth = 'no'
if change_pwauth:
- pa_s = "\(#*\)\(PasswordAuthentication[[:space:]]\+\)\(yes\|no\)"
- msg = "set PasswordAuthentication to '%s'" % pw_auth
- try:
- cmd = ['sed', '-i', 's,%s,\\2%s,' % (pa_s, pw_auth),
- '/etc/ssh/sshd_config']
- util.subp(cmd)
- log.debug(msg)
- except Exception as e:
- log.warn("failed %s" % msg)
- errors.append(e)
+ replaced_auth = False
+
+ # See: man sshd_config
+ conf_fn = cloud.paths.join(True, ssh_util.DEF_SSHD_CFG)
+ old_lines = ssh_util.parse_ssh_config(conf_fn)
+ new_lines = []
+ i = 0
+ for (i, line) in enumerate(old_lines):
+ # Keywords are case-insensitive and arguments are case-sensitive
+ if line.key == 'passwordauthentication':
+ log.debug("Replacing auth line %s with %s", i + 1, pw_auth)
+ replaced_auth = True
+ line.value = pw_auth
+ new_lines.append(line)
+
+ if not replaced_auth:
+ log.debug("Adding new auth line %s", i + 1)
+ replaced_auth = True
+ new_lines.append(ssh_util.SshdConfigLine('',
+ 'PasswordAuthentication',
+ pw_auth))
+
+ lines = [str(e) for e in new_lines]
+ ssh_rw_fn = cloud.paths.join(False, ssh_util.DEF_SSHD_CFG)
+ util.write_file(ssh_rw_fn, "\n".join(lines))
try:
- p = util.subp(['service', cfg.get('ssh_svcname', 'ssh'),
- 'restart'])
- log.debug("restarted sshd")
+ cmd = ['service']
+ cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
+ cmd.append('restart')
+ util.subp(cmd)
+ log.debug("Restarted the ssh daemon")
except:
- log.warn("restart of ssh failed")
+ util.logexc(log, "Restarting of the ssh daemon failed")
if len(errors):
- raise(errors[0])
-
- return
-
-
-def rand_str(strlen=32, select_from=letters + digits):
- return("".join([random.choice(select_from) for _x in range(0, strlen)]))
+ log.debug("%s errors occured, re-raising the last one", len(errors))
+ raise errors[-1]
def rand_user_password(pwlen=9):
- selfrom = (letters.translate(None, 'loLOI') +
- digits.translate(None, '01'))
- return(rand_str(pwlen, select_from=selfrom))
+ return util.rand_str(pwlen, select_from=PW_SET)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
new file mode 100644
index 00000000..4019ae90
--- /dev/null
+++ b/cloudinit/config/cc_ssh.py
@@ -0,0 +1,132 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import glob
+
+from cloudinit import util
+from cloudinit import ssh_util
+
+DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
+"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
+"rather than the user \\\"root\\\".\';echo;sleep 10\"")
+
+KEY_2_FILE = {
+ "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
+ "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
+ "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
+ "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
+ "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
+ "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
+}
+
+PRIV_2_PUB = {
+ 'rsa_private': 'rsa_public',
+ 'dsa_private': 'dsa_public',
+ 'ecdsa_private': 'ecdsa_public',
+}
+
+KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
+
+GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa']
+
+KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+
+
+def handle(_name, cfg, cloud, log, _args):
+
+ # remove the static keys from the pristine image
+ if cfg.get("ssh_deletekeys", True):
+ key_pth = cloud.paths.join(False, "/etc/ssh/", "ssh_host_*key*")
+ for f in glob.glob(key_pth):
+ try:
+ util.del_file(f)
+ except:
+ util.logexc(log, "Failed deleting key file %s", f)
+
+ if "ssh_keys" in cfg:
+ # if there are keys in cloud-config, use them
+ for (key, val) in cfg["ssh_keys"].iteritems():
+ if key in KEY_2_FILE:
+ tgt_fn = KEY_2_FILE[key][0]
+ tgt_perms = KEY_2_FILE[key][1]
+ util.write_file(cloud.paths.join(False, tgt_fn),
+ val, tgt_perms)
+
+ for (priv, pub) in PRIV_2_PUB.iteritems():
+ if pub in cfg['ssh_keys'] or not priv in cfg['ssh_keys']:
+ continue
+ pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
+ cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
+ try:
+ # TODO: Is this guard needed?
+ with util.SeLinuxGuard("/etc/ssh", recursive=True):
+ util.subp(cmd, capture=False)
+ log.debug("Generated a key for %s from %s", pair[0], pair[1])
+ except:
+ util.logexc(log, ("Failed generated a key"
+ " for %s from %s"), pair[0], pair[1])
+ else:
+ # if not, generate them
+ genkeys = util.get_cfg_option_list(cfg,
+ 'ssh_genkeytypes',
+ GENERATE_KEY_NAMES)
+ for keytype in genkeys:
+ keyfile = cloud.paths.join(False, KEY_FILE_TPL % (keytype))
+ util.ensure_dir(os.path.dirname(keyfile))
+ if not os.path.exists(keyfile):
+ cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+ try:
+ # TODO: Is this guard needed?
+ with util.SeLinuxGuard("/etc/ssh", recursive=True):
+ util.subp(cmd, capture=False)
+ except:
+ util.logexc(log, ("Failed generating key type"
+ " %s to file %s"), keytype, keyfile)
+
+ try:
+ user = util.get_cfg_option_str(cfg, 'user')
+ disable_root = util.get_cfg_option_bool(cfg, "disable_root", True)
+ disable_root_opts = util.get_cfg_option_str(cfg, "disable_root_opts",
+ DISABLE_ROOT_OPTS)
+
+ keys = cloud.get_public_ssh_keys() or []
+ if "ssh_authorized_keys" in cfg:
+ cfgkeys = cfg["ssh_authorized_keys"]
+ keys.extend(cfgkeys)
+
+ apply_credentials(keys, user, cloud.paths,
+ disable_root, disable_root_opts)
+ except:
+ util.logexc(log, "Applying ssh credentials failed!")
+
+
+def apply_credentials(keys, user, paths, disable_root, disable_root_opts):
+
+ keys = set(keys)
+ if user:
+ ssh_util.setup_user_keys(keys, user, '', paths)
+
+ if disable_root and user:
+ key_prefix = disable_root_opts.replace('$USER', user)
+ else:
+ key_prefix = ''
+
+ ssh_util.setup_user_keys(keys, 'root', key_prefix, paths)
diff --git a/cloudinit/CloudConfig/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index bbf5bd83..c58b28ec 100644
--- a/cloudinit/CloudConfig/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -18,12 +18,14 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-import traceback
+from cloudinit import util
+# The ssh-import-id only seems to exist on ubuntu (for now)
+# https://launchpad.net/ssh-import-id
+distros = ['ubuntu']
-def handle(_name, cfg, _cloud, log, args):
+
+def handle(name, cfg, _cloud, log, args):
if len(args) != 0:
user = args[0]
ids = []
@@ -31,20 +33,21 @@ def handle(_name, cfg, _cloud, log, args):
ids = args[1:]
else:
user = util.get_cfg_option_str(cfg, "user", "ubuntu")
- ids = util.get_cfg_option_list_or_str(cfg, "ssh_import_id", [])
+ ids = util.get_cfg_option_list(cfg, "ssh_import_id", [])
if len(ids) == 0:
+ log.debug("Skipping module named %s, no ids found to import", name)
return
- cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
+ if not user:
+ log.debug("Skipping module named %s, no user found to import", name)
+ return
- log.debug("importing ssh ids. cmd = %s" % cmd)
+ cmd = ["sudo", "-Hu", user, "ssh-import-id"] + ids
+ log.debug("Importing ssh ids for user %s.", user)
try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd returned %s: %s" % (e.returncode, cmd))
- except OSError as e:
- log.debug(traceback.format_exc(e))
- raise Exception("Cmd failed to execute: %s" % (cmd))
+ util.subp(cmd, capture=False)
+ except util.ProcessExecutionError as e:
+ util.logexc(log, "Failed to run command to import %s ssh ids", user)
+ raise e
diff --git a/cloudinit/config/cc_timezone.py b/cloudinit/config/cc_timezone.py
new file mode 100644
index 00000000..b9eb85b2
--- /dev/null
+++ b/cloudinit/config/cc_timezone.py
@@ -0,0 +1,39 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+frequency = PER_INSTANCE
+
+
+def handle(name, cfg, cloud, log, args):
+ if len(args) != 0:
+ timezone = args[0]
+ else:
+ timezone = util.get_cfg_option_str(cfg, "timezone", False)
+
+ if not timezone:
+ log.debug("Skipping module named %s, no 'timezone' specified", name)
+ return
+
+ # Let the distro handle settings its timezone
+ cloud.distro.set_timezone(timezone)
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
new file mode 100644
index 00000000..c148b12e
--- /dev/null
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -0,0 +1,60 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import util
+from cloudinit import templater
+
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+
+def handle(name, cfg, cloud, log, _args):
+ manage_hosts = util.get_cfg_option_str(cfg, "manage_etc_hosts", False)
+ if util.translate_bool(manage_hosts, addons=['template']):
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ if not hostname:
+ log.warn(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
+ return
+
+ # Render from a template file
+ distro_n = cloud.distro.name
+ tpl_fn_name = cloud.get_template_filename("hosts.%s" % (distro_n))
+ if not tpl_fn_name:
+ raise RuntimeError(("No hosts template could be"
+ " found for distro %s") % (distro_n))
+
+ out_fn = cloud.paths.join(False, '/etc/hosts')
+ templater.render_to_file(tpl_fn_name, out_fn,
+ {'hostname': hostname, 'fqdn': fqdn})
+
+ elif manage_hosts == "localhost":
+ (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ if not hostname:
+ log.warn(("Option 'manage_etc_hosts' was set,"
+ " but no hostname was found"))
+ return
+
+ log.debug("Managing localhost in /etc/hosts")
+ cloud.distro.update_etc_hosts(hostname, fqdn)
+ else:
+ log.debug(("Configuration option 'manage_etc_hosts' is not set,"
+ " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
new file mode 100644
index 00000000..b84a1a06
--- /dev/null
+++ b/cloudinit/config/cc_update_hostname.py
@@ -0,0 +1,41 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2011 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import util
+from cloudinit.settings import PER_ALWAYS
+
+frequency = PER_ALWAYS
+
+
+def handle(name, cfg, cloud, log, _args):
+ if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
+ log.debug(("Configuration option 'preserve_hostname' is set,"
+ " not updating the hostname in module %s"), name)
+ return
+
+ (hostname, _fqdn) = util.get_hostname_fqdn(cfg, cloud)
+ try:
+ prev_fn = os.path.join(cloud.get_cpath('data'), "previous-hostname")
+ cloud.distro.update_hostname(hostname, prev_fn)
+ except Exception:
+ util.logexc(log, "Failed to set the hostname to %s", hostname)
+ raise
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
new file mode 100644
index 00000000..da4d0180
--- /dev/null
+++ b/cloudinit/distros/__init__.py
@@ -0,0 +1,163 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+import abc
+
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import util
+
+# TODO: Make this via config??
+IFACE_ACTIONS = {
+ 'up': ['ifup', '--all'],
+ 'down': ['ifdown', '--all'],
+}
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(object):
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, name, cfg, paths):
+ self._paths = paths
+ self._cfg = cfg
+ self.name = name
+
+ @abc.abstractmethod
+ def install_packages(self, pkglist):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def _write_network(self, settings):
+ # In the future use the http://fedorahosted.org/netcf/
+ # to write this blob out in a distro format
+ raise NotImplementedError()
+
+ def get_option(self, opt_name, default=None):
+ return self._cfg.get(opt_name, default)
+
+ @abc.abstractmethod
+ def set_hostname(self, hostname):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def update_hostname(self, hostname, prev_hostname_fn):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def package_command(self, cmd, args=None):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def update_package_sources(self):
+ raise NotImplementedError()
+
+ def get_package_mirror(self):
+ return self.get_option('package_mirror')
+
+ def apply_network(self, settings, bring_up=True):
+ # Write it out
+ self._write_network(settings)
+ # Now try to bring them up
+ if bring_up:
+ return self._interface_action('up')
+ return False
+
+ @abc.abstractmethod
+ def apply_locale(self, locale, out_fn=None):
+ raise NotImplementedError()
+
+ @abc.abstractmethod
+ def set_timezone(self, tz):
+ raise NotImplementedError()
+
+ def _get_localhost_ip(self):
+ return "127.0.0.1"
+
+ def update_etc_hosts(self, hostname, fqdn):
+ # Format defined at
+ # http://unixhelp.ed.ac.uk/CGI/man-cgi?hosts
+ header = "# Added by cloud-init"
+ real_header = "%s on %s" % (header, util.time_rfc2822())
+ local_ip = self._get_localhost_ip()
+ hosts_line = "%s\t%s %s" % (local_ip, fqdn, hostname)
+ new_etchosts = StringIO()
+ need_write = False
+ need_change = True
+ hosts_ro_fn = self._paths.join(True, "/etc/hosts")
+ for line in util.load_file(hosts_ro_fn).splitlines():
+ if line.strip().startswith(header):
+ continue
+ if not line.strip() or line.strip().startswith("#"):
+ new_etchosts.write("%s\n" % (line))
+ continue
+ split_line = [s.strip() for s in line.split()]
+ if len(split_line) < 2:
+ new_etchosts.write("%s\n" % (line))
+ continue
+ (ip, hosts) = split_line[0], split_line[1:]
+ if ip == local_ip:
+ if sorted([hostname, fqdn]) == sorted(hosts):
+ need_change = False
+ if need_change:
+ line = "%s\n%s" % (real_header, hosts_line)
+ need_change = False
+ need_write = True
+ new_etchosts.write("%s\n" % (line))
+ if need_change:
+ new_etchosts.write("%s\n%s\n" % (real_header, hosts_line))
+ need_write = True
+ if need_write:
+ contents = new_etchosts.getvalue()
+ util.write_file(self._paths.join(False, "/etc/hosts"),
+ contents, mode=0644)
+
+ def _interface_action(self, action):
+ if action not in IFACE_ACTIONS:
+ raise NotImplementedError("Unknown interface action %s" % (action))
+ cmd = IFACE_ACTIONS[action]
+ try:
+ LOG.debug("Attempting to run %s interface action using command %s",
+ action, cmd)
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ return True
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+
+def fetch(name):
+ locs = importer.find_module(name,
+ ['', __name__],
+ ['Distro'])
+ if not locs:
+ raise ImportError("No distribution found for distro %s"
+ % (name))
+ mod = importer.import_module(locs[0])
+ cls = getattr(mod, 'Distro')
+ return cls
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
new file mode 100644
index 00000000..3247d7ce
--- /dev/null
+++ b/cloudinit/distros/debian.py
@@ -0,0 +1,149 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+
+ def apply_locale(self, locale, out_fn=None):
+ if not out_fn:
+ out_fn = self._paths.join(False, '/etc/default/locale')
+ util.subp(['locale-gen', locale], capture=False)
+ util.subp(['update-locale', locale], capture=False)
+ contents = [
+ "# Created by cloud-init",
+ 'LANG="%s"' % (locale),
+ ]
+ util.write_file(out_fn, "\n".join(contents))
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('install', pkglist)
+
+ def _write_network(self, settings):
+ net_fn = self._paths.join(False, "/etc/network/interfaces")
+ util.write_file(net_fn, settings)
+
+ def set_hostname(self, hostname):
+ out_fn = self._paths.join(False, "/etc/hostname")
+ self._write_hostname(hostname, out_fn)
+ if out_fn == '/etc/hostname':
+ # Only do this if we are running in non-adjusted root mode
+ LOG.debug("Setting hostname to %s", hostname)
+ util.subp(['hostname', hostname])
+
+ def _write_hostname(self, hostname, out_fn):
+ lines = []
+ lines.append("# Created by cloud-init")
+ lines.append(str(hostname))
+ contents = "\n".join(lines)
+ util.write_file(out_fn, contents, 0644)
+
+ def update_hostname(self, hostname, prev_fn):
+ hostname_prev = self._read_hostname(prev_fn)
+ read_fn = self._paths.join(True, "/etc/hostname")
+ hostname_in_etc = self._read_hostname(read_fn)
+ update_files = []
+ if not hostname_prev or hostname_prev != hostname:
+ update_files.append(prev_fn)
+ if (not hostname_in_etc or
+ (hostname_in_etc == hostname_prev and
+ hostname_in_etc != hostname)):
+ write_fn = self._paths.join(False, "/etc/hostname")
+ update_files.append(write_fn)
+ for fn in update_files:
+ try:
+ self._write_hostname(hostname, fn)
+ except:
+ util.logexc(LOG, "Failed to write hostname %s to %s",
+ hostname, fn)
+ if (hostname_in_etc and hostname_prev and
+ hostname_in_etc != hostname_prev):
+ LOG.debug(("%s differs from /etc/hostname."
+ " Assuming user maintained hostname."), prev_fn)
+ if "/etc/hostname" in update_files:
+ # Only do this if we are running in non-adjusted root mode
+ LOG.debug("Setting hostname to %s", hostname)
+ util.subp(['hostname', hostname])
+
+ def _read_hostname(self, filename, default=None):
+ contents = util.load_file(filename, quiet=True)
+ for line in contents.splitlines():
+ c_pos = line.find("#")
+ # Handle inline comments
+ if c_pos != -1:
+ line = line[0:c_pos]
+ line_c = line.strip()
+ if line_c:
+ return line_c
+ return default
+
+ def _get_localhost_ip(self):
+ # Note: http://www.leonardoborda.com/blog/127-0-1-1-ubuntu-debian/
+ return "127.0.1.1"
+
+ def set_timezone(self, tz):
+ tz_file = os.path.join("/usr/share/zoneinfo", tz)
+ if not os.path.isfile(tz_file):
+ raise RuntimeError(("Invalid timezone %s,"
+ " no file found at %s") % (tz, tz_file))
+ tz_lines = [
+ "# Created by cloud-init",
+ str(tz),
+ ]
+ tz_contents = "\n".join(tz_lines)
+ tz_fn = self._paths.join(False, "/etc/timezone")
+ util.write_file(tz_fn, tz_contents)
+ util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
+
+ def package_command(self, command, args=None):
+ e = os.environ.copy()
+ # See: http://tiny.cc/kg91fw
+ # Or: http://tiny.cc/mh91fw
+ e['DEBIAN_FRONTEND'] = 'noninteractive'
+ cmd = ['apt-get', '--option', 'Dpkg::Options::=--force-confold',
+ '--assume-yes', '--quiet', command]
+ if args:
+ cmd.extend(args)
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, env=e, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
diff --git a/cloudinit/CloudConfig/cc_foo.py b/cloudinit/distros/fedora.py
index 35ec3fa7..c777845d 100644
--- a/cloudinit/CloudConfig/cc_foo.py
+++ b/cloudinit/distros/fedora.py
@@ -1,10 +1,12 @@
# vi: ts=4 expandtab
#
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,12 +20,12 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#import cloudinit
-#import cloudinit.util as util
-from cloudinit.CloudConfig import per_instance
+from cloudinit.distros import rhel
-frequency = per_instance
+from cloudinit import log as logging
+LOG = logging.getLogger(__name__)
-def handle(_name, _cfg, _cloud, _log, _args):
- print "hi"
+
+class Distro(rhel.Distro):
+ pass
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
new file mode 100644
index 00000000..7fa69f03
--- /dev/null
+++ b/cloudinit/distros/rhel.py
@@ -0,0 +1,337 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+NETWORK_FN_TPL = '/etc/sysconfig/network-scripts/ifcfg-%s'
+
+# See: http://tiny.cc/6r99fw
+# For what alot of these files that are being written
+# are and the format of them
+
+# This library is used to parse/write
+# out the various sysconfig files edited
+#
+# It has to be slightly modified though
+# to ensure that all values are quoted
+# since these configs are usually sourced into
+# bash scripts...
+from configobj import ConfigObj
+
+# See: http://tiny.cc/oezbgw
+D_QUOTE_CHARS = {
+ "\"": "\\\"",
+ "(": "\\(",
+ ")": "\\)",
+ "$": '\$',
+ '`': '\`',
+}
+
+
+class Distro(distros.Distro):
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+
+ def install_packages(self, pkglist):
+ self.package_command('install', pkglist)
+
+ def _write_network(self, settings):
+ # TODO fix this... since this is the ubuntu format
+ entries = translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ # Make the intermediate format as the rhel format...
+ for (dev, info) in entries.iteritems():
+ net_fn = NETWORK_FN_TPL % (dev)
+ net_ro_fn = self._paths.join(True, net_fn)
+ (prev_exist, net_cfg) = self._read_conf(net_ro_fn)
+ net_cfg['DEVICE'] = dev
+ boot_proto = info.get('bootproto')
+ if boot_proto:
+ net_cfg['BOOTPROTO'] = boot_proto
+ net_mask = info.get('netmask')
+ if net_mask:
+ net_cfg["NETMASK"] = net_mask
+ addr = info.get('address')
+ if addr:
+ net_cfg["IPADDR"] = addr
+ if info.get('auto'):
+ net_cfg['ONBOOT'] = 'yes'
+ else:
+ net_cfg['ONBOOT'] = 'no'
+ gtway = info.get('gateway')
+ if gtway:
+ net_cfg["GATEWAY"] = gtway
+ bcast = info.get('broadcast')
+ if bcast:
+ net_cfg["BROADCAST"] = bcast
+ mac_addr = info.get('hwaddress')
+ if mac_addr:
+ net_cfg["MACADDR"] = mac_addr
+ lines = net_cfg.write()
+ if not prev_exist:
+ lines.insert(0, '# Created by cloud-init')
+ w_contents = "\n".join(lines)
+ net_rw_fn = self._paths.join(False, net_fn)
+ util.write_file(net_rw_fn, w_contents, 0644)
+
+ def set_hostname(self, hostname):
+ out_fn = self._paths.join(False, '/etc/sysconfig/network')
+ self._write_hostname(hostname, out_fn)
+ if out_fn == '/etc/sysconfig/network':
+ # Only do this if we are running in non-adjusted root mode
+ LOG.debug("Setting hostname to %s", hostname)
+ util.subp(['hostname', hostname])
+
+ def apply_locale(self, locale, out_fn=None):
+ if not out_fn:
+ out_fn = self._paths.join(False, '/etc/sysconfig/i18n')
+ ro_fn = self._paths.join(True, '/etc/sysconfig/i18n')
+ (_exists, contents) = self._read_conf(ro_fn)
+ contents['LANG'] = locale
+ w_contents = "\n".join(contents.write())
+ util.write_file(out_fn, w_contents, 0644)
+
+ def _write_hostname(self, hostname, out_fn):
+ (_exists, contents) = self._read_conf(out_fn)
+ contents['HOSTNAME'] = hostname
+ w_contents = "\n".join(contents.write())
+ util.write_file(out_fn, w_contents, 0644)
+
+ def update_hostname(self, hostname, prev_file):
+ hostname_prev = self._read_hostname(prev_file)
+ read_fn = self._paths.join(True, "/etc/sysconfig/network")
+ hostname_in_sys = self._read_hostname(read_fn)
+ update_files = []
+ if not hostname_prev or hostname_prev != hostname:
+ update_files.append(prev_file)
+ if (not hostname_in_sys or
+ (hostname_in_sys == hostname_prev
+ and hostname_in_sys != hostname)):
+ write_fn = self._paths.join(False, "/etc/sysconfig/network")
+ update_files.append(write_fn)
+ for fn in update_files:
+ try:
+ self._write_hostname(hostname, fn)
+ except:
+ util.logexc(LOG, "Failed to write hostname %s to %s",
+ hostname, fn)
+ if (hostname_in_sys and hostname_prev and
+ hostname_in_sys != hostname_prev):
+ LOG.debug(("%s differs from /etc/sysconfig/network."
+ " Assuming user maintained hostname."), prev_file)
+ if "/etc/sysconfig/network" in update_files:
+ # Only do this if we are running in non-adjusted root mode
+ LOG.debug("Setting hostname to %s", hostname)
+ util.subp(['hostname', hostname])
+
+ def _read_hostname(self, filename, default=None):
+ (_exists, contents) = self._read_conf(filename)
+ if 'HOSTNAME' in contents:
+ return contents['HOSTNAME']
+ else:
+ return default
+
+ def _read_conf(self, fn):
+ exists = False
+ if os.path.isfile(fn):
+ contents = util.load_file(fn).splitlines()
+ exists = True
+ else:
+ contents = []
+ return (exists, QuotingConfigObj(contents))
+
+ def set_timezone(self, tz):
+ tz_file = os.path.join("/usr/share/zoneinfo", tz)
+ if not os.path.isfile(tz_file):
+ raise RuntimeError(("Invalid timezone %s,"
+ " no file found at %s") % (tz, tz_file))
+ # Adjust the sysconfig clock zone setting
+ read_fn = self._paths.join(True, "/etc/sysconfig/clock")
+ (_exists, contents) = self._read_conf(read_fn)
+ contents['ZONE'] = tz
+ tz_contents = "\n".join(contents.write())
+ write_fn = self._paths.join(False, "/etc/sysconfig/clock")
+ util.write_file(write_fn, tz_contents)
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self._paths.join(False, "/etc/localtime"))
+
+ def package_command(self, command, args=None):
+ cmd = ['yum']
+ # If enabled, then yum will be tolerant of errors on the command line
+ # with regard to packages.
+ # For example: if you request to install foo, bar and baz and baz is
+ # installed; yum won't error out complaining that baz is already
+ # installed.
+ cmd.append("-t")
+ # Determines whether or not yum prompts for confirmation
+ # of critical actions. We don't want to prompt...
+ cmd.append("-y")
+ cmd.append(command)
+ if args:
+ cmd.extend(args)
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["update"], freq=PER_INSTANCE)
+
+
+# This class helps adjust the configobj
+# writing to ensure that when writing a k/v
+# on a line, that they are properly quoted
+# and have no spaces between the '=' sign.
+# - This is mainly due to the fact that
+# the sysconfig scripts are often sourced
+# directly into bash/shell scripts so ensure
+# that it works for those types of use cases.
+class QuotingConfigObj(ConfigObj):
+ def __init__(self, lines):
+ ConfigObj.__init__(self, lines,
+ interpolation=False,
+ write_empty_values=True)
+
+ def _quote_posix(self, text):
+ if not text:
+ return ''
+ for (k, v) in D_QUOTE_CHARS.iteritems():
+ text = text.replace(k, v)
+ return '"%s"' % (text)
+
+ def _quote_special(self, text):
+ if text.lower() in ['yes', 'no', 'true', 'false']:
+ return text
+ else:
+ return self._quote_posix(text)
+
+ def _write_line(self, indent_string, entry, this_entry, comment):
+ # Ensure it is formatted fine for
+ # how these sysconfig scripts are used
+ val = self._decode_element(self._quote(this_entry))
+ # Single quoted strings should
+ # always work.
+ if not val.startswith("'"):
+ # Perform any special quoting
+ val = self._quote_special(val)
+ key = self._decode_element(self._quote(entry, multiline=False))
+ cmnt = self._decode_element(comment)
+ return '%s%s%s%s%s' % (indent_string,
+ key,
+ "=",
+ val,
+ cmnt)
+
+
+# This is a util function to translate a ubuntu /etc/network/interfaces 'blob'
+# to a rhel equiv. that can then be written to /etc/sysconfig/network-scripts/
+# TODO remove when we have python-netcf active...
+def translate_network(settings):
+ # Get the standard cmd, args from the ubuntu format
+ entries = []
+ for line in settings.splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ split_up = line.split(None, 1)
+ if len(split_up) <= 1:
+ continue
+ entries.append(split_up)
+ # Figure out where each iface section is
+ ifaces = []
+ consume = {}
+ for (cmd, args) in entries:
+ if cmd == 'iface':
+ if consume:
+ ifaces.append(consume)
+ consume = {}
+ consume[cmd] = args
+ else:
+ consume[cmd] = args
+ # Check if anything left over to consume
+ absorb = False
+ for (cmd, args) in consume.iteritems():
+ if cmd == 'iface':
+ absorb = True
+ if absorb:
+ ifaces.append(consume)
+ # Now translate
+ real_ifaces = {}
+ for info in ifaces:
+ if 'iface' not in info:
+ continue
+ iface_details = info['iface'].split(None)
+ dev_name = None
+ if len(iface_details) >= 1:
+ dev = iface_details[0].strip().lower()
+ if dev:
+ dev_name = dev
+ if not dev_name:
+ continue
+ iface_info = {}
+ if len(iface_details) >= 3:
+ proto_type = iface_details[2].strip().lower()
+ # Seems like this can be 'loopback' which we don't
+ # really care about
+ if proto_type in ['dhcp', 'static']:
+ iface_info['bootproto'] = proto_type
+ # These can just be copied over
+ for k in ['netmask', 'address', 'gateway', 'broadcast']:
+ if k in info:
+ val = info[k].strip().lower()
+ if val:
+ iface_info[k] = val
+ # Is any mac address spoofing going on??
+ if 'hwaddress' in info:
+ hw_info = info['hwaddress'].lower().strip()
+ hw_split = hw_info.split(None, 1)
+ if len(hw_split) == 2 and hw_split[0].startswith('ether'):
+ hw_addr = hw_split[1]
+ if hw_addr:
+ iface_info['hwaddress'] = hw_addr
+ real_ifaces[dev_name] = iface_info
+ # Check for those that should be started on boot via 'auto'
+ for (cmd, args) in entries:
+ if cmd == 'auto':
+ # Seems like auto can be like 'auto eth0 eth0:1' so just get the
+ # first part out as the device name
+ args = args.split(None)
+ if not args:
+ continue
+ dev_name = args[0].strip().lower()
+ if dev_name in real_ifaces:
+ real_ifaces[dev_name]['auto'] = True
+ return real_ifaces
diff --git a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py b/cloudinit/distros/ubuntu.py
index 6b31ea8e..77c2aff4 100644
--- a/cloudinit/CloudConfig/cc_disable_ec2_metadata.py
+++ b/cloudinit/distros/ubuntu.py
@@ -1,10 +1,12 @@
# vi: ts=4 expandtab
#
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -17,14 +19,13 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.util as util
-import subprocess
-from cloudinit.CloudConfig import per_always
-frequency = per_always
+from cloudinit.distros import debian
+from cloudinit import log as logging
-def handle(_name, cfg, _cloud, _log, _args):
- if util.get_cfg_option_bool(cfg, "disable_ec2_metadata", False):
- fwall = "route add -host 169.254.169.254 reject"
- subprocess.call(fwall.split(' '))
+LOG = logging.getLogger(__name__)
+
+
+class Distro(debian.Distro):
+ pass
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
new file mode 100644
index 00000000..dce2abef
--- /dev/null
+++ b/cloudinit/handlers/__init__.py
@@ -0,0 +1,222 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import abc
+import os
+
+from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
+
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+# Used as the content type when a message is not multipart
+# and it doesn't contain its own content-type
+NOT_MULTIPART_TYPE = "text/x-not-multipart"
+
+# When none is assigned this gets used
+OCTET_TYPE = 'application/octet-stream'
+
+# Special content types that signal the start and end of processing
+CONTENT_END = "__end__"
+CONTENT_START = "__begin__"
+CONTENT_SIGNALS = [CONTENT_START, CONTENT_END]
+
+# Used when a part-handler type is encountered
+# to allow for registration of new types.
+PART_CONTENT_TYPES = ["text/part-handler"]
+PART_HANDLER_FN_TMPL = 'part-handler-%03d'
+
+# For parts without filenames
+PART_FN_TPL = 'part-%03d'
+
+# Different file beginnings to there content type
+INCLUSION_TYPES_MAP = {
+ '#include': 'text/x-include-url',
+ '#include-once': 'text/x-include-once-url',
+ '#!': 'text/x-shellscript',
+ '#cloud-config': 'text/cloud-config',
+ '#upstart-job': 'text/upstart-job',
+ '#part-handler': 'text/part-handler',
+ '#cloud-boothook': 'text/cloud-boothook',
+ '#cloud-config-archive': 'text/cloud-config-archive',
+}
+
+# Sorted longest first
+INCLUSION_SRCH = sorted(list(INCLUSION_TYPES_MAP.keys()),
+ key=(lambda e: 0 - len(e)))
+
+
+class Handler(object):
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, frequency, version=2):
+ self.handler_version = version
+ self.frequency = frequency
+
+ def __repr__(self):
+ return "%s: [%s]" % (util.obj_name(self), self.list_types())
+
+ @abc.abstractmethod
+ def list_types(self):
+ raise NotImplementedError()
+
+ def handle_part(self, data, ctype, filename, payload, frequency):
+ return self._handle_part(data, ctype, filename, payload, frequency)
+
+ @abc.abstractmethod
+ def _handle_part(self, data, ctype, filename, payload, frequency):
+ raise NotImplementedError()
+
+
+def run_part(mod, data, ctype, filename, payload, frequency):
+ mod_freq = mod.frequency
+ if not (mod_freq == PER_ALWAYS or
+ (frequency == PER_INSTANCE and mod_freq == PER_INSTANCE)):
+ return
+ mod_ver = mod.handler_version
+ # Sanity checks on version (should be an int convertable)
+ try:
+ mod_ver = int(mod_ver)
+ except:
+ mod_ver = 1
+ try:
+ LOG.debug("Calling handler %s (%s, %s, %s) with frequency %s",
+ mod, ctype, filename, mod_ver, frequency)
+ if mod_ver >= 2:
+ # Treat as v. 2 which does get a frequency
+ mod.handle_part(data, ctype, filename, payload, frequency)
+ else:
+ # Treat as v. 1 which gets no frequency
+ mod.handle_part(data, ctype, filename, payload)
+ except:
+ util.logexc(LOG, ("Failed calling handler %s (%s, %s, %s)"
+ " with frequency %s"),
+ mod, ctype, filename,
+ mod_ver, frequency)
+
+
+def call_begin(mod, data, frequency):
+ run_part(mod, data, CONTENT_START, None, None, frequency)
+
+
+def call_end(mod, data, frequency):
+ run_part(mod, data, CONTENT_END, None, None, frequency)
+
+
+def walker_handle_handler(pdata, _ctype, _filename, payload):
+ curcount = pdata['handlercount']
+ modname = PART_HANDLER_FN_TMPL % (curcount)
+ frequency = pdata['frequency']
+ modfname = os.path.join(pdata['handlerdir'], "%s" % (modname))
+ if not modfname.endswith(".py"):
+ modfname = "%s.py" % (modfname)
+ # TODO: Check if path exists??
+ util.write_file(modfname, payload, 0600)
+ handlers = pdata['handlers']
+ try:
+ mod = fixup_handler(importer.import_module(modname))
+ call_begin(mod, pdata['data'], frequency)
+ # Only register and increment
+ # after the above have worked (so we don't if it
+ # fails)
+ handlers.register(mod)
+ pdata['handlercount'] = curcount + 1
+ except:
+ util.logexc(LOG, ("Failed at registering python file: %s"
+ " (part handler %s)"), modfname, curcount)
+
+
+def _extract_first_or_bytes(blob, size):
+ # Extract the first line upto X bytes or X bytes from more than the
+ # first line if the first line does not contain enough bytes
+ first_line = blob.split("\n", 1)[0]
+ if len(first_line) >= size:
+ start = first_line[:size]
+ else:
+ start = blob[0:size]
+ return start
+
+
+def walker_callback(pdata, ctype, filename, payload):
+ if ctype in PART_CONTENT_TYPES:
+ walker_handle_handler(pdata, ctype, filename, payload)
+ return
+ handlers = pdata['handlers']
+ if ctype not in pdata['handlers'] and payload:
+ # Extract the first line or 24 bytes for displaying in the log
+ start = _extract_first_or_bytes(payload, 24)
+ details = "'%s...'" % (start.encode("string-escape"))
+ if ctype == NOT_MULTIPART_TYPE:
+ LOG.warning("Unhandled non-multipart (%s) userdata: %s",
+ ctype, details)
+ else:
+ LOG.warning("Unhandled unknown content-type (%s) userdata: %s",
+ ctype, details)
+ else:
+ run_part(handlers[ctype], pdata['data'], ctype, filename,
+ payload, pdata['frequency'])
+
+
+# Callback is a function that will be called with
+# (data, content_type, filename, payload)
+def walk(msg, callback, data):
+ partnum = 0
+ for part in msg.walk():
+ # multipart/* are just containers
+ if part.get_content_maintype() == 'multipart':
+ continue
+
+ ctype = part.get_content_type()
+ if ctype is None:
+ ctype = OCTET_TYPE
+
+ filename = part.get_filename()
+ if not filename:
+ filename = PART_FN_TPL % (partnum)
+
+ callback(data, ctype, filename, part.get_payload(decode=True))
+ partnum = partnum + 1
+
+
+def fixup_handler(mod, def_freq=PER_INSTANCE):
+ if not hasattr(mod, "handler_version"):
+ setattr(mod, "handler_version", 1)
+ if not hasattr(mod, 'frequency'):
+ setattr(mod, 'frequency', def_freq)
+ else:
+ freq = mod.frequency
+ if freq and freq not in FREQUENCIES:
+ LOG.warn("Handler %s has an unknown frequency %s", mod, freq)
+ return mod
+
+
+def type_from_starts_with(payload, default=None):
+ payload_lc = payload.lower()
+ payload_lc = payload_lc.lstrip()
+ for text in INCLUSION_SRCH:
+ if payload_lc.startswith(text):
+ return INCLUSION_TYPES_MAP[text]
+ return default
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
new file mode 100644
index 00000000..456b8020
--- /dev/null
+++ b/cloudinit/handlers/boot_hook.py
@@ -0,0 +1,73 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import (PER_ALWAYS)
+
+LOG = logging.getLogger(__name__)
+
+
+class BootHookPartHandler(handlers.Handler):
+ def __init__(self, paths, datasource, **_kwargs):
+ handlers.Handler.__init__(self, PER_ALWAYS)
+ self.boothook_dir = paths.get_ipath("boothooks")
+ self.instance_id = None
+ if datasource:
+ self.instance_id = datasource.get_instance_id()
+
+ def list_types(self):
+ return [
+ handlers.type_from_starts_with("#cloud-boothook"),
+ ]
+
+ def _write_part(self, payload, filename):
+ filename = util.clean_filename(filename)
+ payload = util.dos2unix(payload)
+ prefix = "#cloud-boothook"
+ start = 0
+ if payload.startswith(prefix):
+ start = len(prefix) + 1
+ filepath = os.path.join(self.boothook_dir, filename)
+ contents = payload[start:]
+ util.write_file(filepath, contents, 0700)
+ return filepath
+
+ def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ if ctype in handlers.CONTENT_SIGNALS:
+ return
+
+ filepath = self._write_part(payload, filename)
+ try:
+ env = os.environ.copy()
+ if self.instance_id is not None:
+ env['INSTANCE_ID'] = str(self.instance_id)
+ util.subp([filepath], env=env)
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Boothooks script %s execution error", filepath)
+ except Exception:
+ util.logexc(LOG, ("Boothooks unknown "
+ "error when running %s"), filepath)
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
new file mode 100644
index 00000000..f6d95244
--- /dev/null
+++ b/cloudinit/handlers/cloud_config.py
@@ -0,0 +1,62 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import (PER_ALWAYS)
+
+LOG = logging.getLogger(__name__)
+
+
+class CloudConfigPartHandler(handlers.Handler):
+ def __init__(self, paths, **_kwargs):
+ handlers.Handler.__init__(self, PER_ALWAYS)
+ self.cloud_buf = []
+ self.cloud_fn = paths.get_ipath("cloud_config")
+
+ def list_types(self):
+ return [
+ handlers.type_from_starts_with("#cloud-config"),
+ ]
+
+ def _write_cloud_config(self, buf):
+ if not self.cloud_fn:
+ return
+ lines = [str(b) for b in buf]
+ payload = "\n".join(lines)
+ util.write_file(self.cloud_fn, payload, 0600)
+
+ def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ if ctype == handlers.CONTENT_START:
+ self.cloud_buf = []
+ return
+ if ctype == handlers.CONTENT_END:
+ self._write_cloud_config(self.cloud_buf)
+ self.cloud_buf = []
+ return
+
+ filename = util.clean_filename(filename)
+ if not filename:
+ filename = '??'
+ self.cloud_buf.extend(["#%s" % (filename), str(payload)])
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
new file mode 100644
index 00000000..a9d8e544
--- /dev/null
+++ b/cloudinit/handlers/shell_script.py
@@ -0,0 +1,52 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import (PER_ALWAYS)
+
+LOG = logging.getLogger(__name__)
+
+
+class ShellScriptPartHandler(handlers.Handler):
+ def __init__(self, paths, **_kwargs):
+ handlers.Handler.__init__(self, PER_ALWAYS)
+ self.script_dir = paths.get_ipath_cur('scripts')
+
+ def list_types(self):
+ return [
+ handlers.type_from_starts_with("#!"),
+ ]
+
+ def _handle_part(self, _data, ctype, filename, payload, _frequency):
+ if ctype in handlers.CONTENT_SIGNALS:
+ # TODO: maybe delete existing things here
+ return
+
+ filename = util.clean_filename(filename)
+ payload = util.dos2unix(payload)
+ path = os.path.join(self.script_dir, filename)
+ util.write_file(path, payload, 0700)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
new file mode 100644
index 00000000..99e0afde
--- /dev/null
+++ b/cloudinit/handlers/upstart_job.py
@@ -0,0 +1,66 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+import os
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.settings import (PER_INSTANCE)
+
+LOG = logging.getLogger(__name__)
+
+
+class UpstartJobPartHandler(handlers.Handler):
+ def __init__(self, paths, **_kwargs):
+ handlers.Handler.__init__(self, PER_INSTANCE)
+ self.upstart_dir = paths.upstart_conf_d
+
+ def list_types(self):
+ return [
+ handlers.type_from_starts_with("#upstart-job"),
+ ]
+
+ def _handle_part(self, _data, ctype, filename, payload, frequency):
+ if ctype in handlers.CONTENT_SIGNALS:
+ return
+
+ # See: https://bugs.launchpad.net/bugs/819507
+ if frequency != PER_INSTANCE:
+ return
+
+ if not self.upstart_dir:
+ return
+
+ filename = util.clean_filename(filename)
+ (_name, ext) = os.path.splitext(filename)
+ if not ext:
+ ext = ''
+ ext = ext.lower()
+ if ext != ".conf":
+ filename = filename + ".conf"
+
+ payload = util.dos2unix(payload)
+ path = os.path.join(self.upstart_dir, filename)
+ util.write_file(path, payload, 0644)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
new file mode 100644
index 00000000..15036a50
--- /dev/null
+++ b/cloudinit/helpers.py
@@ -0,0 +1,452 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from time import time
+
+import contextlib
+import io
+import os
+
+from ConfigParser import (NoSectionError, NoOptionError, RawConfigParser)
+
+from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
+ CFG_ENV_NAME)
+
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class LockFailure(Exception):
+ pass
+
+
+class DummyLock(object):
+ pass
+
+
+class DummySemaphores(object):
+ def __init__(self):
+ pass
+
+ @contextlib.contextmanager
+ def lock(self, _name, _freq, _clear_on_fail=False):
+ yield DummyLock()
+
+ def has_run(self, _name, _freq):
+ return False
+
+ def clear(self, _name, _freq):
+ return True
+
+ def clear_all(self):
+ pass
+
+
+class FileLock(object):
+ def __init__(self, fn):
+ self.fn = fn
+
+
+class FileSemaphores(object):
+ def __init__(self, sem_path):
+ self.sem_path = sem_path
+
+ @contextlib.contextmanager
+ def lock(self, name, freq, clear_on_fail=False):
+ try:
+ yield self._acquire(name, freq)
+ except:
+ if clear_on_fail:
+ self.clear(name, freq)
+ raise
+
+ def clear(self, name, freq):
+ sem_file = self._get_path(name, freq)
+ try:
+ util.del_file(sem_file)
+ except (IOError, OSError):
+ util.logexc(LOG, "Failed deleting semaphore %s", sem_file)
+ return False
+ return True
+
+ def clear_all(self):
+ try:
+ util.del_dir(self.sem_path)
+ except (IOError, OSError):
+ util.logexc(LOG, "Failed deleting semaphore directory %s",
+ self.sem_path)
+
+ def _acquire(self, name, freq):
+ # Check again if its been already gotten
+ if self.has_run(name, freq):
+ return None
+ # This is a race condition since nothing atomic is happening
+ # here, but this should be ok due to the nature of when
+ # and where cloud-init runs... (file writing is not a lock...)
+ sem_file = self._get_path(name, freq)
+ contents = "%s: %s\n" % (os.getpid(), time())
+ try:
+ util.write_file(sem_file, contents)
+ except (IOError, OSError):
+ util.logexc(LOG, "Failed writing semaphore file %s", sem_file)
+ return None
+ return FileLock(sem_file)
+
+ def has_run(self, name, freq):
+ if not freq or freq == PER_ALWAYS:
+ return False
+ sem_file = self._get_path(name, freq)
+ # This isn't really a good atomic check
+ # but it suffices for where and when cloudinit runs
+ if os.path.exists(sem_file):
+ return True
+ return False
+
+ def _get_path(self, name, freq):
+ sem_path = self.sem_path
+ if not freq or freq == PER_INSTANCE:
+ return os.path.join(sem_path, name)
+ else:
+ return os.path.join(sem_path, "%s.%s" % (name, freq))
+
+
+class Runners(object):
+ def __init__(self, paths):
+ self.paths = paths
+ self.sems = {}
+
+ def _get_sem(self, freq):
+ if freq == PER_ALWAYS or not freq:
+ return None
+ sem_path = None
+ if freq == PER_INSTANCE:
+ # This may not exist,
+ # so thats why we still check for none
+ # below if say the paths object
+ # doesn't have a datasource that can
+ # provide this instance path...
+ sem_path = self.paths.get_ipath("sem")
+ elif freq == PER_ONCE:
+ sem_path = self.paths.get_cpath("sem")
+ if not sem_path:
+ return None
+ if sem_path not in self.sems:
+ self.sems[sem_path] = FileSemaphores(sem_path)
+ return self.sems[sem_path]
+
+ def run(self, name, functor, args, freq=None, clear_on_fail=False):
+ sem = self._get_sem(freq)
+ if not sem:
+ sem = DummySemaphores()
+ if not args:
+ args = []
+ if sem.has_run(name, freq):
+ LOG.debug("%s already ran (freq=%s)", name, freq)
+ return (False, None)
+ with sem.lock(name, freq, clear_on_fail) as lk:
+ if not lk:
+ raise LockFailure("Failed to acquire lock for %s" % name)
+ else:
+ LOG.debug("Running %s using lock (%s)", name, lk)
+ if isinstance(args, (dict)):
+ results = functor(**args)
+ else:
+ results = functor(*args)
+ return (True, results)
+
+
+class ConfigMerger(object):
+ def __init__(self, paths=None, datasource=None,
+ additional_fns=None, base_cfg=None):
+ self._paths = paths
+ self._ds = datasource
+ self._fns = additional_fns
+ self._base_cfg = base_cfg
+ # Created on first use
+ self._cfg = None
+
+ def _get_datasource_configs(self):
+ d_cfgs = []
+ if self._ds:
+ try:
+ ds_cfg = self._ds.get_config_obj()
+ if ds_cfg and isinstance(ds_cfg, (dict)):
+ d_cfgs.append(ds_cfg)
+ except:
+ util.logexc(LOG, ("Failed loading of datasource"
+ " config object from %s"), self._ds)
+ return d_cfgs
+
+ def _get_env_configs(self):
+ e_cfgs = []
+ if CFG_ENV_NAME in os.environ:
+ e_fn = os.environ[CFG_ENV_NAME]
+ try:
+ e_cfgs.append(util.read_conf(e_fn))
+ except:
+ util.logexc(LOG, ('Failed loading of env. config'
+ ' from %s'), e_fn)
+ return e_cfgs
+
+ def _get_instance_configs(self):
+ i_cfgs = []
+ # If cloud-config was written, pick it up as
+ # a configuration file to use when running...
+ if not self._paths:
+ return i_cfgs
+ cc_fn = self._paths.get_ipath_cur('cloud_config')
+ if cc_fn and os.path.isfile(cc_fn):
+ try:
+ i_cfgs.append(util.read_conf(cc_fn))
+ except:
+ util.logexc(LOG, ('Failed loading of cloud-config'
+ ' from %s'), cc_fn)
+ return i_cfgs
+
+ def _read_cfg(self):
+ # Input config files override
+ # env config files which
+ # override instance configs
+ # which override datasource
+ # configs which override
+ # base configuration
+ cfgs = []
+ if self._fns:
+ for c_fn in self._fns:
+ try:
+ cfgs.append(util.read_conf(c_fn))
+ except:
+ util.logexc(LOG, ("Failed loading of configuration"
+ " from %s"), c_fn)
+
+ cfgs.extend(self._get_env_configs())
+ cfgs.extend(self._get_instance_configs())
+ cfgs.extend(self._get_datasource_configs())
+ if self._base_cfg:
+ cfgs.append(self._base_cfg)
+ return util.mergemanydict(cfgs)
+
+ @property
+ def cfg(self):
+ # None check to avoid empty case causing re-reading
+ if self._cfg is None:
+ self._cfg = self._read_cfg()
+ return self._cfg
+
+
+class ContentHandlers(object):
+
+ def __init__(self):
+ self.registered = {}
+
+ def __contains__(self, item):
+ return self.is_registered(item)
+
+ def __getitem__(self, key):
+ return self._get_handler(key)
+
+ def is_registered(self, content_type):
+ return content_type in self.registered
+
+ def register(self, mod):
+ types = set()
+ for t in mod.list_types():
+ self.registered[t] = mod
+ types.add(t)
+ return types
+
+ def _get_handler(self, content_type):
+ return self.registered[content_type]
+
+ def items(self):
+ return self.registered.items()
+
+ def iteritems(self):
+ return self.registered.iteritems()
+
+ def register_defaults(self, defs):
+ registered = set()
+ for mod in defs:
+ for t in mod.list_types():
+ if not self.is_registered(t):
+ self.registered[t] = mod
+ registered.add(t)
+ return registered
+
+
+class Paths(object):
+ def __init__(self, path_cfgs, ds=None):
+ self.cfgs = path_cfgs
+ # Populate all the initial paths
+ self.cloud_dir = self.join(False,
+ path_cfgs.get('cloud_dir',
+ '/var/lib/cloud'))
+ self.instance_link = os.path.join(self.cloud_dir, 'instance')
+ self.boot_finished = os.path.join(self.instance_link, "boot-finished")
+ self.upstart_conf_d = path_cfgs.get('upstart_dir')
+ if self.upstart_conf_d:
+ self.upstart_conf_d = self.join(False, self.upstart_conf_d)
+ self.seed_dir = os.path.join(self.cloud_dir, 'seed')
+ # This one isn't joined, since it should just be read-only
+ template_dir = path_cfgs.get('templates_dir', '/etc/cloud/templates/')
+ self.template_tpl = os.path.join(template_dir, '%s.tmpl')
+ self.lookups = {
+ "handlers": "handlers",
+ "scripts": "scripts",
+ "sem": "sem",
+ "boothooks": "boothooks",
+ "userdata_raw": "user-data.txt",
+ "userdata": "user-data.txt.i",
+ "obj_pkl": "obj.pkl",
+ "cloud_config": "cloud-config.txt",
+ "data": "data",
+ }
+ # Set when a datasource becomes active
+ self.datasource = ds
+
+ # joins the paths but also appends a read
+ # or write root if available
+ def join(self, read_only, *paths):
+ if read_only:
+ root = self.cfgs.get('read_root')
+ else:
+ root = self.cfgs.get('write_root')
+ if not paths:
+ return root
+ if len(paths) > 1:
+ joined = os.path.join(*paths)
+ else:
+ joined = paths[0]
+ if root:
+ pre_joined = joined
+ # Need to remove any starting '/' since this
+ # will confuse os.path.join
+ joined = joined.lstrip("/")
+ joined = os.path.join(root, joined)
+ LOG.debug("Translated %s to adjusted path %s (read-only=%s)",
+ pre_joined, joined, read_only)
+ return joined
+
+ # get_ipath_cur: get the current instance path for an item
+ def get_ipath_cur(self, name=None):
+ ipath = self.instance_link
+ add_on = self.lookups.get(name)
+ if add_on:
+ ipath = os.path.join(ipath, add_on)
+ return ipath
+
+ # get_cpath : get the "clouddir" (/var/lib/cloud/<name>)
+ # for a name in dirmap
+ def get_cpath(self, name=None):
+ cpath = self.cloud_dir
+ add_on = self.lookups.get(name)
+ if add_on:
+ cpath = os.path.join(cpath, add_on)
+ return cpath
+
+ # _get_ipath : get the instance path for a name in pathmap
+ # (/var/lib/cloud/instances/<instance>/<name>)
+ def _get_ipath(self, name=None):
+ if not self.datasource:
+ return None
+ iid = self.datasource.get_instance_id()
+ if iid is None:
+ return None
+ ipath = os.path.join(self.cloud_dir, 'instances', str(iid))
+ add_on = self.lookups.get(name)
+ if add_on:
+ ipath = os.path.join(ipath, add_on)
+ return ipath
+
+ # get_ipath : get the instance path for a name in pathmap
+ # (/var/lib/cloud/instances/<instance>/<name>)
+ # returns None + warns if no active datasource....
+ def get_ipath(self, name=None):
+ ipath = self._get_ipath(name)
+ if not ipath:
+ LOG.warn(("No per instance data available, "
+ "is there an datasource/iid set?"))
+ return None
+ else:
+ return ipath
+
+
+# This config parser will not throw when sections don't exist
+# and you are setting values on those sections which is useful
+# when writing to new options that may not have corresponding
+# sections. Also it can default other values when doing gets
+# so that if those sections/options do not exist you will
+# get a default instead of an error. Another useful case where
+# you can avoid catching exceptions that you typically don't
+# care about...
+
+class DefaultingConfigParser(RawConfigParser):
+ DEF_INT = 0
+ DEF_FLOAT = 0.0
+ DEF_BOOLEAN = False
+ DEF_BASE = None
+
+ def get(self, section, option):
+ value = self.DEF_BASE
+ try:
+ value = RawConfigParser.get(self, section, option)
+ except NoSectionError:
+ pass
+ except NoOptionError:
+ pass
+ return value
+
+ def set(self, section, option, value=None):
+ if not self.has_section(section) and section.lower() != 'default':
+ self.add_section(section)
+ RawConfigParser.set(self, section, option, value)
+
+ def remove_option(self, section, option):
+ if self.has_option(section, option):
+ RawConfigParser.remove_option(self, section, option)
+
+ def getboolean(self, section, option):
+ if not self.has_option(section, option):
+ return self.DEF_BOOLEAN
+ return RawConfigParser.getboolean(self, section, option)
+
+ def getfloat(self, section, option):
+ if not self.has_option(section, option):
+ return self.DEF_FLOAT
+ return RawConfigParser.getfloat(self, section, option)
+
+ def getint(self, section, option):
+ if not self.has_option(section, option):
+ return self.DEF_INT
+ return RawConfigParser.getint(self, section, option)
+
+ def stringify(self, header=None):
+ contents = ''
+ with io.BytesIO() as outputstream:
+ self.write(outputstream)
+ outputstream.flush()
+ contents = outputstream.getvalue()
+ if header:
+ contents = "\n".join([header, contents])
+ return contents
diff --git a/cloudinit/importer.py b/cloudinit/importer.py
new file mode 100644
index 00000000..71cf2726
--- /dev/null
+++ b/cloudinit/importer.py
@@ -0,0 +1,65 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import sys
+
+from cloudinit import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def import_module(module_name):
+ __import__(module_name)
+ return sys.modules[module_name]
+
+
+def find_module(base_name, search_paths, required_attrs=None):
+ found_places = []
+ if not required_attrs:
+ required_attrs = []
+ real_paths = []
+ for path in search_paths:
+ real_path = []
+ if path:
+ real_path.extend(path.split("."))
+ real_path.append(base_name)
+ full_path = '.'.join(real_path)
+ real_paths.append(full_path)
+ LOG.debug("Looking for modules %s that have attributes %s",
+ real_paths, required_attrs)
+ for full_path in real_paths:
+ mod = None
+ try:
+ mod = import_module(full_path)
+ except ImportError:
+ pass
+ if not mod:
+ continue
+ found_attrs = 0
+ for attr in required_attrs:
+ if hasattr(mod, attr):
+ found_attrs += 1
+ if found_attrs == len(required_attrs):
+ found_places.append(full_path)
+ LOG.debug("Found %s with attributes %s in %s", base_name,
+ required_attrs, found_places)
+ return found_places
diff --git a/cloudinit/log.py b/cloudinit/log.py
new file mode 100644
index 00000000..fc1428a2
--- /dev/null
+++ b/cloudinit/log.py
@@ -0,0 +1,133 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import logging.handlers
+import logging.config
+
+import os
+import sys
+
+from StringIO import StringIO
+
+# Logging levels for easy access
+CRITICAL = logging.CRITICAL
+FATAL = logging.FATAL
+ERROR = logging.ERROR
+WARNING = logging.WARNING
+WARN = logging.WARN
+INFO = logging.INFO
+DEBUG = logging.DEBUG
+NOTSET = logging.NOTSET
+
+# Default basic format
+DEF_CON_FORMAT = '%(asctime)s - %(filename)s[%(levelname)s]: %(message)s'
+
+
+def setupBasicLogging():
+ root = logging.getLogger()
+ console = logging.StreamHandler(sys.stderr)
+ console.setFormatter(logging.Formatter(DEF_CON_FORMAT))
+ console.setLevel(DEBUG)
+ root.addHandler(console)
+ root.setLevel(DEBUG)
+
+
+def setupLogging(cfg=None):
+ # See if the config provides any logging conf...
+ if not cfg:
+ cfg = {}
+
+ log_cfgs = []
+ log_cfg = cfg.get('logcfg')
+ if log_cfg and isinstance(log_cfg, (str, basestring)):
+ # If there is a 'logcfg' entry in the config,
+ # respect it, it is the old keyname
+ log_cfgs.append(str(log_cfg))
+ elif "log_cfgs" in cfg and isinstance(cfg['log_cfgs'], (set, list)):
+ for a_cfg in cfg['log_cfgs']:
+ if isinstance(a_cfg, (list, set, dict)):
+ cfg_str = [str(c) for c in a_cfg]
+ log_cfgs.append('\n'.join(cfg_str))
+ else:
+ log_cfgs.append(str(a_cfg))
+
+ # See if any of them actually load...
+ am_tried = 0
+ am_worked = 0
+ for i, log_cfg in enumerate(log_cfgs):
+ try:
+ am_tried += 1
+ # Assume its just a string if not a filename
+ if log_cfg.startswith("/") and os.path.isfile(log_cfg):
+ pass
+ else:
+ log_cfg = StringIO(log_cfg)
+ # Attempt to load its config
+ logging.config.fileConfig(log_cfg)
+ am_worked += 1
+ except Exception as e:
+ sys.stderr.write(("WARN: Setup of logging config %s"
+ " failed due to: %s\n") % (i + 1, e))
+
+ # If it didn't work, at least setup a basic logger (if desired)
+ basic_enabled = cfg.get('log_basic', True)
+ if not am_worked:
+ sys.stderr.write(("WARN: no logging configured!"
+ " (tried %s configs)\n") % (am_tried))
+ if basic_enabled:
+ sys.stderr.write("Setting up basic logging...\n")
+ setupBasicLogging()
+
+
+def getLogger(name='cloudinit'):
+ return logging.getLogger(name)
+
+
+# Fixes this annoyance...
+# No handlers could be found for logger XXX annoying output...
+try:
+ from logging import NullHandler
+except ImportError:
+ class NullHandler(logging.Handler):
+ def emit(self, record):
+ pass
+
+
+def _resetLogger(log):
+ if not log:
+ return
+ handlers = list(log.handlers)
+ for h in handlers:
+ h.flush()
+ h.close()
+ log.removeHandler(h)
+ log.setLevel(NOTSET)
+ log.addHandler(NullHandler())
+
+
+def resetLogging():
+ _resetLogger(logging.getLogger())
+ _resetLogger(getLogger())
+
+
+resetLogging()
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index aac4af04..feba5a62 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -1,11 +1,12 @@
-#!/usr/bin/python
# vi: ts=4 expandtab
#
# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -21,6 +22,8 @@
import cloudinit.util as util
+from prettytable import PrettyTable
+
def netdev_info(empty=""):
fields = ("hwaddr", "addr", "bcast", "mask")
@@ -66,51 +69,99 @@ def netdev_info(empty=""):
if dev[field] == "":
dev[field] = empty
- return(devs)
+ return devs
def route_info():
(route_out, _err) = util.subp(["route", "-n"])
routes = []
- for line in str(route_out).splitlines()[1:]:
+ entries = route_out.splitlines()[1:]
+ for line in entries:
if not line:
continue
toks = line.split()
- if toks[0] == "Kernel" or toks[0] == "Destination":
+ if len(toks) < 8 or toks[0] == "Kernel" or toks[0] == "Destination":
continue
- routes.append(toks)
- return(routes)
+ entry = {
+ 'destination': toks[0],
+ 'gateway': toks[1],
+ 'genmask': toks[2],
+ 'flags': toks[3],
+ 'metric': toks[4],
+ 'ref': toks[5],
+ 'use': toks[6],
+ 'iface': toks[7],
+ }
+ routes.append(entry)
+ return routes
def getgateway():
- for r in route_info():
- if r[3].find("G") >= 0:
- return("%s[%s]" % (r[1], r[7]))
- return(None)
+ routes = []
+ try:
+ routes = route_info()
+ except:
+ pass
+ for r in routes:
+ if r['flags'].find("G") >= 0:
+ return "%s[%s]" % (r['gateway'], r['iface'])
+ return None
-def debug_info(pre="ci-info: "):
+def netdev_pformat():
lines = []
try:
netdev = netdev_info(empty=".")
except Exception:
- lines.append("netdev_info failed!")
- netdev = {}
- for (dev, d) in netdev.iteritems():
- lines.append("%s%-6s: %i %-15s %-15s %s" %
- (pre, dev, d["up"], d["addr"], d["mask"], d["hwaddr"]))
+ lines.append(util.center("Net device info failed", '!', 80))
+ netdev = None
+ if netdev is not None:
+ fields = ['Device', 'Up', 'Address', 'Mask', 'Hw-Address']
+ tbl = PrettyTable(fields)
+ for (dev, d) in netdev.iteritems():
+ tbl.add_row([dev, d["up"], d["addr"], d["mask"], d["hwaddr"]])
+ netdev_s = tbl.get_string()
+ max_len = len(max(netdev_s.splitlines(), key=len))
+ header = util.center("Net device info", "+", max_len)
+ lines.extend([header, netdev_s])
+ return "\n".join(lines)
+
+
+def route_pformat():
+ lines = []
try:
routes = route_info()
except Exception:
- lines.append("route_info failed")
- routes = []
- n = 0
- for r in routes:
- lines.append("%sroute-%d: %-15s %-15s %-15s %-6s %s" %
- (pre, n, r[0], r[1], r[2], r[7], r[3]))
- n = n + 1
- return('\n'.join(lines))
+ lines.append(util.center('Route info failed', '!', 80))
+ routes = None
+ if routes is not None:
+ fields = ['Route', 'Destination', 'Gateway',
+ 'Genmask', 'Interface', 'Flags']
+ tbl = PrettyTable(fields)
+ for (n, r) in enumerate(routes):
+ route_id = str(n)
+ tbl.add_row([route_id, r['destination'],
+ r['gateway'], r['genmask'],
+ r['iface'], r['flags']])
+ route_s = tbl.get_string()
+ max_len = len(max(route_s.splitlines(), key=len))
+ header = util.center("Route info", "+", max_len)
+ lines.extend([header, route_s])
+ return "\n".join(lines)
-if __name__ == '__main__':
- print debug_info()
+def debug_info(prefix='ci-info: '):
+ lines = []
+ netdev_lines = netdev_pformat().splitlines()
+ if prefix:
+ for line in netdev_lines:
+ lines.append("%s%s" % (prefix, line))
+ else:
+ lines.extend(netdev_lines)
+ route_lines = route_pformat().splitlines()
+ if prefix:
+ for line in route_lines:
+ lines.append("%s%s" % (prefix, line))
+ else:
+ lines.extend(route_lines)
+ return "\n".join(lines)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
new file mode 100644
index 00000000..2083cf60
--- /dev/null
+++ b/cloudinit/settings.py
@@ -0,0 +1,57 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+# Set and read for determining the cloud config file location
+CFG_ENV_NAME = "CLOUD_CFG"
+
+# This is expected to be a yaml formatted file
+CLOUD_CONFIG = '/etc/cloud/cloud.cfg'
+
+# What u get if no config is provided
+CFG_BUILTIN = {
+ 'datasource_list': [
+ 'NoCloud',
+ 'ConfigDrive',
+ 'OVF',
+ 'MAAS',
+ 'Ec2',
+ 'CloudStack'
+ ],
+ 'def_log_file': '/var/log/cloud-init.log',
+ 'log_cfgs': [],
+ 'syslog_fix_perms': 'syslog:adm',
+ 'system_info': {
+ 'paths': {
+ 'cloud_dir': '/var/lib/cloud',
+ 'templates_dir': '/etc/cloud/templates/',
+ },
+ 'distro': 'ubuntu',
+ },
+}
+
+# Valid frequencies of handlers/modules
+PER_INSTANCE = "once-per-instance"
+PER_ALWAYS = "always"
+PER_ONCE = "once"
+
+# Used to sanity check incoming handlers/modules frequencies
+FREQUENCIES = [PER_INSTANCE, PER_ALWAYS, PER_ONCE]
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
new file mode 100644
index 00000000..751bef4f
--- /dev/null
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -0,0 +1,147 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Cosmin Luta
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Cosmin Luta <q4break@gmail.com>
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from socket import inet_ntoa
+from struct import pack
+
+import os
+import time
+
+import boto.utils as boto_utils
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceCloudStack(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'cs')
+ # Cloudstack has its metadata/userdata URLs located at
+ # http://<default-gateway-ip>/latest/
+ self.api_ver = 'latest'
+ gw_addr = self.get_default_gateway()
+ if not gw_addr:
+ raise RuntimeError("No default gateway found!")
+ self.metadata_address = "http://%s/" % (gw_addr)
+
+ def get_default_gateway(self):
+ """ Returns the default gateway ip address in the dotted format
+ """
+ lines = util.load_file("/proc/net/route").splitlines()
+ for line in lines:
+ items = line.split("\t")
+ if items[1] == "00000000":
+ # Found the default route, get the gateway
+ gw = inet_ntoa(pack("<L", int(items[2], 16)))
+ LOG.debug("Found default route, gateway is %s", gw)
+ return gw
+ return None
+
+ def __str__(self):
+ return util.obj_name(self)
+
+ def _get_url_settings(self):
+ mcfg = self.ds_cfg
+ if not mcfg:
+ mcfg = {}
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ util.logexc(LOG, "Failed to get timeout, using %s", timeout)
+
+ return (max_wait, timeout)
+
+ def wait_for_metadata_service(self):
+ mcfg = self.ds_cfg
+ if not mcfg:
+ mcfg = {}
+
+ (max_wait, timeout) = self._get_url_settings()
+
+ urls = [self.metadata_address]
+ start_time = time.time()
+ url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout, status_cb=LOG.warn)
+
+ if url:
+ LOG.debug("Using metadata source: '%s'", url)
+ else:
+ LOG.critical(("Giving up on waiting for the metadata from %s"
+ " after %s seconds"),
+ urls, int(time.time() - start_time))
+
+ return bool(url)
+
+ def get_data(self):
+ seed_ret = {}
+ if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
+ self.userdata_raw = seed_ret['user-data']
+ self.metadata = seed_ret['meta-data']
+ LOG.debug("Using seeded cloudstack data from: %s", self.seed_dir)
+ return True
+ try:
+ if not self.wait_for_metadata_service():
+ return False
+ start_time = time.time()
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
+ None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.metadata_address)
+ LOG.debug("Crawl of metadata service took %s seconds",
+ int(time.time() - start_time))
+ return True
+ except Exception:
+ util.logexc(LOG, ('Failed fetching from metadata '
+ 'service %s'), self.metadata_address)
+ return False
+
+ def get_instance_id(self):
+ return self.metadata['instance-id']
+
+ def get_availability_zone(self):
+ return self.metadata['availability-zone']
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
new file mode 100644
index 00000000..320dd1d1
--- /dev/null
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -0,0 +1,226 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import json
+import os
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+# Various defaults/constants...
+DEFAULT_IID = "iid-dsconfigdrive"
+DEFAULT_MODE = 'pass'
+CFG_DRIVE_FILES = [
+ "etc/network/interfaces",
+ "root/.ssh/authorized_keys",
+ "meta.js",
+]
+DEFAULT_METADATA = {
+ "instance-id": DEFAULT_IID,
+ "dsmode": DEFAULT_MODE,
+}
+CFG_DRIVE_DEV_ENV = 'CLOUD_INIT_CONFIG_DRIVE_DEVICE'
+
+
+class DataSourceConfigDrive(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.cfg = {}
+ self.dsmode = 'local'
+ self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
+
+ def __str__(self):
+ mstr = "%s [%s]" % (util.obj_name(self), self.dsmode)
+ mstr += "[seed=%s]" % (self.seed)
+ return mstr
+
+ def get_data(self):
+ found = None
+ md = {}
+ ud = ""
+
+ if os.path.isdir(self.seed_dir):
+ try:
+ (md, ud) = read_config_drive_dir(self.seed_dir)
+ found = self.seed_dir
+ except NonConfigDriveDir:
+ util.logexc(LOG, "Failed reading config drive from %s",
+ self.seed_dir)
+ if not found:
+ dev = find_cfg_drive_device()
+ if dev:
+ try:
+ (md, ud) = util.mount_cb(dev, read_config_drive_dir)
+ found = dev
+ except (NonConfigDriveDir, util.MountFailedError):
+ pass
+
+ if not found:
+ return False
+
+ if 'dsconfig' in md:
+ self.cfg = md['dscfg']
+
+ md = util.mergedict(md, DEFAULT_METADATA)
+
+ # Update interfaces and ifup only on the local datasource
+ # this way the DataSourceConfigDriveNet doesn't do it also.
+ if 'network-interfaces' in md and self.dsmode == "local":
+ LOG.debug("Updating network interfaces from config drive (%s)",
+ md['dsmode'])
+ self.distro.apply_network(md['network-interfaces'])
+
+ self.seed = found
+ self.metadata = md
+ self.userdata_raw = ud
+
+ if md['dsmode'] == self.dsmode:
+ return True
+
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
+ return False
+
+ def get_public_ssh_keys(self):
+ if not 'public-keys' in self.metadata:
+ return []
+ return self.metadata['public-keys']
+
+ # The data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return self.cfg
+
+
+class DataSourceConfigDriveNet(DataSourceConfigDrive):
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceConfigDrive.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'net'
+
+
+class NonConfigDriveDir(Exception):
+ pass
+
+
+def find_cfg_drive_device():
+ """ Get the config drive device. Return a string like '/dev/vdb'
+ or None (if there is no non-root device attached). This does not
+ check the contents, only reports that if there *were* a config_drive
+ attached, it would be this device.
+ Note: per config_drive documentation, this is
+ "associated as the last available disk on the instance"
+ """
+
+ # This seems to be for debugging??
+ if CFG_DRIVE_DEV_ENV in os.environ:
+ return os.environ[CFG_DRIVE_DEV_ENV]
+
+ # We are looking for a raw block device (sda, not sda1) with a vfat
+ # filesystem on it....
+ letters = "abcdefghijklmnopqrstuvwxyz"
+ devs = util.find_devs_with("TYPE=vfat")
+
+ # Filter out anything not ending in a letter (ignore partitions)
+ devs = [f for f in devs if f[-1] in letters]
+
+ # Sort them in reverse so "last" device is first
+ devs.sort(reverse=True)
+
+ if devs:
+ return devs[0]
+
+ return None
+
+
+def read_config_drive_dir(source_dir):
+ """
+ read_config_drive_dir(source_dir):
+ read source_dir, and return a tuple with metadata dict and user-data
+ string populated. If not a valid dir, raise a NonConfigDriveDir
+ """
+
+ # TODO: fix this for other operating systems...
+ # Ie: this is where https://fedorahosted.org/netcf/ or similar should
+ # be hooked in... (or could be)
+ found = {}
+ for af in CFG_DRIVE_FILES:
+ fn = os.path.join(source_dir, af)
+ if os.path.isfile(fn):
+ found[af] = fn
+
+ if len(found) == 0:
+ raise NonConfigDriveDir("%s: %s" % (source_dir, "no files found"))
+
+ md = {}
+ ud = ""
+ keydata = ""
+ if "etc/network/interfaces" in found:
+ fn = found["etc/network/interfaces"]
+ md['network-interfaces'] = util.load_file(fn)
+
+ if "root/.ssh/authorized_keys" in found:
+ fn = found["root/.ssh/authorized_keys"]
+ keydata = util.load_file(fn)
+
+ meta_js = {}
+ if "meta.js" in found:
+ fn = found['meta.js']
+ content = util.load_file(fn)
+ try:
+ # Just check if its really json...
+ meta_js = json.loads(content)
+ if not isinstance(meta_js, (dict)):
+ raise TypeError("Dict expected for meta.js root node")
+ except (ValueError, TypeError) as e:
+ raise NonConfigDriveDir("%s: %s, %s" %
+ (source_dir, "invalid json in meta.js", e))
+ md['meta_js'] = content
+
+ # Key data override??
+ keydata = meta_js.get('public-keys', keydata)
+ if keydata:
+ lines = keydata.splitlines()
+ md['public-keys'] = [l for l in lines
+ if len(l) and not l.startswith("#")]
+
+ for copy in ('dsmode', 'instance-id', 'dscfg'):
+ if copy in meta_js:
+ md[copy] = meta_js[copy]
+
+ if 'user-data' in meta_js:
+ ud = meta_js['user-data']
+
+ return (md, ud)
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceConfigDrive, (sources.DEP_FILESYSTEM, )),
+ (DataSourceConfigDriveNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
new file mode 100644
index 00000000..cb460de1
--- /dev/null
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -0,0 +1,265 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import time
+
+import boto.utils as boto_utils
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+DEF_MD_URL = "http://169.254.169.254"
+
+# Which version we are requesting of the ec2 metadata apis
+DEF_MD_VERSION = '2009-04-04'
+
+# Default metadata urls that will be used if none are provided
+# They will be checked for 'resolveability' and some of the
+# following may be discarded if they do not resolve
+DEF_MD_URLS = [DEF_MD_URL, "http://instance-data:8773"]
+
+
+class DataSourceEc2(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.metadata_address = DEF_MD_URL
+ self.seed_dir = os.path.join(paths.seed_dir, "ec2")
+ self.api_ver = DEF_MD_VERSION
+
+ def __str__(self):
+ return util.obj_name(self)
+
+ def get_data(self):
+ seed_ret = {}
+ if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
+ self.userdata_raw = seed_ret['user-data']
+ self.metadata = seed_ret['meta-data']
+ LOG.debug("Using seeded ec2 data from %s", self.seed_dir)
+ return True
+
+ try:
+ if not self.wait_for_metadata_service():
+ return False
+ start_time = time.time()
+ self.userdata_raw = boto_utils.get_instance_userdata(self.api_ver,
+ None, self.metadata_address)
+ self.metadata = boto_utils.get_instance_metadata(self.api_ver,
+ self.metadata_address)
+ LOG.debug("Crawl of metadata service took %s seconds",
+ int(time.time() - start_time))
+ return True
+ except Exception:
+ util.logexc(LOG, "Failed reading from metadata address %s",
+ self.metadata_address)
+ return False
+
+ def get_instance_id(self):
+ return self.metadata['instance-id']
+
+ def get_availability_zone(self):
+ return self.metadata['placement']['availability-zone']
+
+ def get_local_mirror(self):
+ return self.get_mirror_from_availability_zone()
+
+ def get_mirror_from_availability_zone(self, availability_zone=None):
+ # Availability is like 'us-west-1b' or 'eu-west-1a'
+ if availability_zone is None:
+ availability_zone = self.get_availability_zone()
+
+ if self.is_vpc():
+ return None
+
+ # Use the distro to get the mirror
+ if not availability_zone:
+ return None
+
+ mirror_tpl = self.distro.get_option('availability_zone_template')
+ if not mirror_tpl:
+ return None
+
+ tpl_params = {
+ 'zone': availability_zone.strip(),
+ }
+ mirror_url = mirror_tpl % (tpl_params)
+
+ (max_wait, timeout) = self._get_url_settings()
+ worked = uhelp.wait_for_url([mirror_url], max_wait=max_wait,
+ timeout=timeout, status_cb=LOG.warn)
+ if not worked:
+ return None
+
+ return mirror_url
+
+ def _get_url_settings(self):
+ mcfg = self.ds_cfg
+ if not mcfg:
+ mcfg = {}
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ util.logexc(LOG, "Failed to get timeout, using %s", timeout)
+
+ return (max_wait, timeout)
+
+ def wait_for_metadata_service(self):
+ mcfg = self.ds_cfg
+ if not mcfg:
+ mcfg = {}
+
+ (max_wait, timeout) = self._get_url_settings()
+
+ # Remove addresses from the list that wont resolve.
+ mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
+ filtered = [x for x in mdurls if util.is_resolvable_url(x)]
+
+ if set(filtered) != set(mdurls):
+ LOG.debug("Removed the following from metadata urls: %s",
+ list((set(mdurls) - set(filtered))))
+
+ if len(filtered):
+ mdurls = filtered
+ else:
+ LOG.warn("Empty metadata url list! using default list")
+ mdurls = DEF_MD_URLS
+
+ urls = []
+ url2base = {}
+ for url in mdurls:
+ cur = "%s/%s/meta-data/instance-id" % (url, self.api_ver)
+ urls.append(cur)
+ url2base[cur] = url
+
+ start_time = time.time()
+ url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout, status_cb=LOG.warn)
+
+ if url:
+ LOG.debug("Using metadata source: '%s'", url2base[url])
+ else:
+ LOG.critical("Giving up on md from %s after %s seconds",
+ urls, int(time.time() - start_time))
+
+ self.metadata_address = url2base.get(url)
+ return bool(url)
+
+ def _remap_device(self, short_name):
+ # LP: #611137
+ # the metadata service may believe that devices are named 'sda'
+ # when the kernel named them 'vda' or 'xvda'
+ # we want to return the correct value for what will actually
+ # exist in this instance
+ mappings = {"sd": ("vd", "xvd")}
+ for (nfrom, tlist) in mappings.iteritems():
+ if not short_name.startswith(nfrom):
+ continue
+ for nto in tlist:
+ cand = "/dev/%s%s" % (nto, short_name[len(nfrom):])
+ if os.path.exists(cand):
+ return cand
+ return None
+
+ def device_name_to_device(self, name):
+ # Consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ if 'block-device-mapping' not in self.metadata:
+ return None
+
+ # Example:
+ # 'block-device-mapping':
+ # {'ami': '/dev/sda1',
+ # 'ephemeral0': '/dev/sdb',
+ # 'root': '/dev/sda1'}
+ found = None
+ bdm_items = self.metadata['block-device-mapping'].iteritems()
+ for (entname, device) in bdm_items:
+ if entname == name:
+ found = device
+ break
+ # LP: #513842 mapping in Euca has 'ephemeral' not 'ephemeral0'
+ if entname == "ephemeral" and name == "ephemeral0":
+ found = device
+
+ if found is None:
+ LOG.debug("Unable to convert %s to a device", name)
+ return None
+
+ ofound = found
+ if not found.startswith("/"):
+ found = "/dev/%s" % found
+
+ if os.path.exists(found):
+ return found
+
+ remapped = self._remap_device(os.path.basename(found))
+ if remapped:
+ LOG.debug("Remapped device name %s => %s", (found, remapped))
+ return remapped
+
+ # On t1.micro, ephemeral0 will appear in block-device-mapping from
+ # metadata, but it will not exist on disk (and never will)
+ # at this point, we've verified that the path did not exist
+ # in the special case of 'ephemeral0' return None to avoid bogus
+ # fstab entry (LP: #744019)
+ if name == "ephemeral0":
+ return None
+ return ofound
+
+ def is_vpc(self):
+ # See: https://bugs.launchpad.net/ubuntu/+source/cloud-init/+bug/615545
+ # Detect that the machine was launched in a VPC.
+ # But I did notice that when in a VPC, meta-data
+ # does not have public-ipv4 and public-hostname
+ # listed as a possibility.
+ ph = "public-hostname"
+ p4 = "public-ipv4"
+ if ((ph not in self.metadata or self.metadata[ph] == "") and
+ (p4 not in self.metadata or self.metadata[p4] == "")):
+ return True
+ return False
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
new file mode 100644
index 00000000..f16d5c21
--- /dev/null
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -0,0 +1,264 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import errno
+import oauth.oauth as oauth
+import os
+import time
+import urllib2
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import url_helper as uhelp
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+MD_VERSION = "2012-03-01"
+
+
+class DataSourceMAAS(sources.DataSource):
+ """
+ DataSourceMAAS reads instance information from MAAS.
+ Given a config metadata_url, and oauth tokens, it expects to find
+ files under the root named:
+ instance-id
+ user-data
+ hostname
+ """
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.base_url = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'maas')
+
+ def __str__(self):
+ return "%s [%s]" % (util.obj_name(self), self.base_url)
+
+ def get_data(self):
+ mcfg = self.ds_cfg
+
+ try:
+ (userdata, metadata) = read_maas_seed_dir(self.seed_dir)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ self.base_url = self.seed_dir
+ return True
+ except MAASSeedDirNone:
+ pass
+ except MAASSeedDirMalformed as exc:
+ LOG.warn("%s was malformed: %s" % (self.seed_dir, exc))
+ raise
+
+ # If there is no metadata_url, then we're not configured
+ url = mcfg.get('metadata_url', None)
+ if not url:
+ return False
+
+ try:
+ if not self.wait_for_metadata_service(url):
+ return False
+
+ self.base_url = url
+
+ (userdata, metadata) = read_maas_seed_url(self.base_url,
+ self.md_headers)
+ self.userdata_raw = userdata
+ self.metadata = metadata
+ return True
+ except Exception:
+ util.logexc(LOG, "Failed fetching metadata from url %s", url)
+ return False
+
+ def md_headers(self, url):
+ mcfg = self.ds_cfg
+
+ # If we are missing token_key, token_secret or consumer_key
+ # then just do non-authed requests
+ for required in ('token_key', 'token_secret', 'consumer_key'):
+ if required not in mcfg:
+ return {}
+
+ consumer_secret = mcfg.get('consumer_secret', "")
+ return oauth_headers(url=url,
+ consumer_key=mcfg['consumer_key'],
+ token_key=mcfg['token_key'],
+ token_secret=mcfg['token_secret'],
+ consumer_secret=consumer_secret)
+
+ def wait_for_metadata_service(self, url):
+ mcfg = self.ds_cfg
+
+ max_wait = 120
+ try:
+ max_wait = int(mcfg.get("max_wait", max_wait))
+ except Exception:
+ util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
+
+ if max_wait == 0:
+ return False
+
+ timeout = 50
+ try:
+ if timeout in mcfg:
+ timeout = int(mcfg.get("timeout", timeout))
+ except Exception:
+ LOG.warn("Failed to get timeout, using %s" % timeout)
+
+ starttime = time.time()
+ check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
+ urls = [check_url]
+ url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
+ timeout=timeout, status_cb=LOG.warn,
+ headers_cb=self.md_headers)
+
+ if url:
+ LOG.debug("Using metadata source: '%s'", url)
+ else:
+ LOG.critical("Giving up on md from %s after %i seconds",
+ urls, int(time.time() - starttime))
+
+ return bool(url)
+
+
+def read_maas_seed_dir(seed_d):
+ """
+ Return user-data and metadata for a maas seed dir in seed_d.
+ Expected format of seed_d are the following files:
+ * instance-id
+ * local-hostname
+ * user-data
+ """
+ if not os.path.isdir(seed_d):
+ raise MAASSeedDirNone("%s: not a directory")
+
+ files = ('local-hostname', 'instance-id', 'user-data', 'public-keys')
+ md = {}
+ for fname in files:
+ try:
+ md[fname] = util.load_file(os.path.join(seed_d, fname))
+ except IOError as e:
+ if e.errno != errno.ENOENT:
+ raise
+
+ return check_seed_contents(md, seed_d)
+
+
+def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
+ version=MD_VERSION):
+ """
+ Read the maas datasource at seed_url.
+ header_cb is a method that should return a headers dictionary that will
+ be given to urllib2.Request()
+
+ Expected format of seed_url is are the following files:
+ * <seed_url>/<version>/meta-data/instance-id
+ * <seed_url>/<version>/meta-data/local-hostname
+ * <seed_url>/<version>/user-data
+ """
+ base_url = "%s/%s" % (seed_url, version)
+ file_order = [
+ 'local-hostname',
+ 'instance-id',
+ 'public-keys',
+ 'user-data',
+ ]
+ files = {
+ 'local-hostname': "%s/%s" % (base_url, 'meta-data/local-hostname'),
+ 'instance-id': "%s/%s" % (base_url, 'meta-data/instance-id'),
+ 'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
+ 'user-data': "%s/%s" % (base_url, 'user-data'),
+ }
+ md = {}
+ for name in file_order:
+ url = files.get(name)
+ if header_cb:
+ headers = header_cb(url)
+ else:
+ headers = {}
+ try:
+ resp = uhelp.readurl(url, headers=headers, timeout=timeout)
+ if resp.ok():
+ md[name] = str(resp)
+ else:
+ LOG.warn(("Fetching from %s resulted in"
+ " an invalid http code %s"), url, resp.code)
+ except urllib2.HTTPError as e:
+ if e.code != 404:
+ raise
+ return check_seed_contents(md, seed_url)
+
+
+def check_seed_contents(content, seed):
+ """Validate if content is Is the content a dict that is valid as a
+ return for a datasource.
+ Either return a (userdata, metadata) tuple or
+ Raise MAASSeedDirMalformed or MAASSeedDirNone
+ """
+ md_required = ('instance-id', 'local-hostname')
+ if len(content) == 0:
+ raise MAASSeedDirNone("%s: no data files found" % seed)
+
+ found = list(content.keys())
+ missing = [k for k in md_required if k not in found]
+ if len(missing):
+ raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
+
+ userdata = content.get('user-data', "")
+ md = {}
+ for (key, val) in content.iteritems():
+ if key == 'user-data':
+ continue
+ md[key] = val
+
+ return (userdata, md)
+
+
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret):
+ consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
+ token = oauth.OAuthToken(token_key, token_secret)
+ params = {
+ 'oauth_version': "1.0",
+ 'oauth_nonce': oauth.generate_nonce(),
+ 'oauth_timestamp': int(time.time()),
+ 'oauth_token': token.key,
+ 'oauth_consumer_key': consumer.key,
+ }
+ req = oauth.OAuthRequest(http_url=url, parameters=params)
+ req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
+ consumer, token)
+ return req.to_header()
+
+
+class MAASSeedDirNone(Exception):
+ pass
+
+
+class MAASSeedDirMalformed(Exception):
+ pass
+
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index e8c56b8f..bed500a2 100644
--- a/cloudinit/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -2,9 +2,11 @@
#
# Copyright (C) 2009-2010 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,33 +20,34 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
import errno
-import subprocess
+import os
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+LOG = logging.getLogger(__name__)
-class DataSourceNoCloud(DataSource.DataSource):
- metadata = None
- userdata = None
- userdata_raw = None
- supported_seed_starts = ("/", "file://")
- dsmode = "local"
- seed = None
- cmdline_id = "ds=nocloud"
- seeddir = base_seeddir + '/nocloud'
+
+class DataSourceNoCloud(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.dsmode = 'local'
+ self.seed = None
+ self.cmdline_id = "ds=nocloud"
+ self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "DataSourceNoCloud"
- mstr = mstr + " [seed=%s]" % self.seed
- return(mstr)
+ mstr = "%s [seed=%s][dsmode=%s]" % (util.obj_name(self),
+ self.seed, self.dsmode)
+ return mstr
def get_data(self):
defaults = {
- "instance-id": "nocloud", "dsmode": self.dsmode
+ "instance-id": "nocloud",
+ "dsmode": self.dsmode,
}
found = []
@@ -52,24 +55,24 @@ class DataSourceNoCloud(DataSource.DataSource):
ud = ""
try:
- # parse the kernel command line, getting data passed in
+ # Parse the kernel command line, getting data passed in
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
except:
- util.logexc(log)
+ util.logexc(LOG, "Unable to parse command line data")
return False
- # check to see if the seeddir has data.
+ # Check to see if the seed dir has data.
seedret = {}
- if util.read_optional_seed(seedret, base=self.seeddir + "/"):
+ if util.read_optional_seed(seedret, base=self.seed_dir + "/"):
md = util.mergedict(md, seedret['meta-data'])
ud = seedret['user-data']
- found.append(self.seeddir)
- log.debug("using seeded cache data in %s" % self.seeddir)
+ found.append(self.seed_dir)
+ LOG.debug("Using seeded cache data from %s", self.seed_dir)
- # if the datasource config had a 'seedfrom' entry, then that takes
+ # If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
- # but not over external medi
+ # but not over external media
if 'seedfrom' in self.ds_cfg and self.ds_cfg['seedfrom']:
found.append("ds_config")
md["seedfrom"] = self.ds_cfg['seedfrom']
@@ -83,35 +86,37 @@ class DataSourceNoCloud(DataSource.DataSource):
for dev in devlist:
try:
- (newmd, newud) = util.mount_callback_umount(dev,
- util.read_seeded)
+ LOG.debug("Attempting to use data from %s", dev)
+
+ (newmd, newud) = util.mount_cb(dev, util.read_seeded)
md = util.mergedict(newmd, md)
ud = newud
- # for seed from a device, the default mode is 'net'.
+ # For seed from a device, the default mode is 'net'.
# that is more likely to be what is desired.
# If they want dsmode of local, then they must
# specify that.
if 'dsmode' not in md:
md['dsmode'] = "net"
- log.debug("using data from %s" % dev)
+ LOG.debug("Using data from %s", dev)
found.append(dev)
break
- except OSError, e:
+ except OSError as e:
if e.errno != errno.ENOENT:
raise
- except util.mountFailedError:
- log.warn("Failed to mount %s when looking for seed" % dev)
+ except util.MountFailedError:
+ util.logexc(LOG, ("Failed to mount %s"
+ " when looking for data"), dev)
- # there was no indication on kernel cmdline or data
+ # There was no indication on kernel cmdline or data
# in the seeddir suggesting this handler should be used.
if len(found) == 0:
return False
seeded_interfaces = None
- # the special argument "seedfrom" indicates we should
+ # The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
# on the command line, ie: ds=nocloud;s=http://bit.ly/abcdefg
@@ -123,57 +128,46 @@ class DataSourceNoCloud(DataSource.DataSource):
seedfound = proto
break
if not seedfound:
- log.debug("seed from %s not supported by %s" %
- (seedfrom, self.__class__))
+ LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
if 'network-interfaces' in md:
seeded_interfaces = self.dsmode
- # this could throw errors, but the user told us to do it
+ # This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- log.debug("using seeded cache data from %s" % seedfrom)
+ LOG.debug("Using seeded cache data from %s", seedfrom)
- # values in the command line override those from the seed
+ # Values in the command line override those from the seed
md = util.mergedict(md, md_seed)
found.append(seedfrom)
+ # Now that we have exhausted any other places merge in the defaults
md = util.mergedict(md, defaults)
- # update the network-interfaces if metadata had 'network-interfaces'
+ # Update the network-interfaces if metadata had 'network-interfaces'
# entry and this is the local datasource, or 'seedfrom' was used
# and the source of the seed was self.dsmode
# ('local' for NoCloud, 'net' for NoCloudNet')
if ('network-interfaces' in md and
(self.dsmode in ("local", seeded_interfaces))):
- log.info("updating network interfaces from nocloud")
-
- util.write_file("/etc/network/interfaces",
- md['network-interfaces'])
- try:
- (out, err) = util.subp(['ifup', '--all'])
- if len(out) or len(err):
- log.warn("ifup --all had stderr: %s" % err)
-
- except subprocess.CalledProcessError as exc:
- log.warn("ifup --all failed: %s" % (exc.output[1]))
-
- self.seed = ",".join(found)
- self.metadata = md
- self.userdata_raw = ud
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(md['network-interfaces'])
if md['dsmode'] == self.dsmode:
+ self.seed = ",".join(found)
+ self.metadata = md
+ self.userdata_raw = ud
return True
- log.debug("%s: not claiming datasource, dsmode=%s" %
- (self, md['dsmode']))
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
return False
-# returns true or false indicating if cmdline indicated
+# Returns true or false indicating if cmdline indicated
# that this module should be used
-# example cmdline:
+# Example cmdline:
# root=LABEL=uec-rootfs ro ds=nocloud
def parse_cmdline_data(ds_id, fill, cmdline=None):
if cmdline is None:
@@ -210,23 +204,25 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
k = s2l[k]
fill[k] = v
- return(True)
+ return True
class DataSourceNoCloudNet(DataSourceNoCloud):
- cmdline_id = "ds=nocloud-net"
- supported_seed_starts = ("http://", "https://", "ftp://")
- seeddir = base_seeddir + '/nocloud-net'
- dsmode = "net"
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
+ self.cmdline_id = "ds=nocloud-net"
+ self.supported_seed_starts = ("http://", "https://", "ftp://")
+ self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
+ self.dsmode = "net"
-datasources = (
- (DataSourceNoCloud, (DataSource.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet,
- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
-)
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
-# return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index a0b1b518..7728b36f 100644
--- a/cloudinit/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -2,9 +2,11 @@
#
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,33 +20,30 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import cloudinit.DataSource as DataSource
-
-from cloudinit import seeddir as base_seeddir
-from cloudinit import log
-import cloudinit.util as util
-import os.path
-import os
from xml.dom import minidom
+
import base64
+import os
import re
-import tempfile
-import subprocess
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
-class DataSourceOVF(DataSource.DataSource):
- seed = None
- seeddir = base_seeddir + '/ovf'
- environment = None
- cfg = {}
- userdata_raw = None
- metadata = None
- supported_seed_starts = ("/", "file://")
+
+class DataSourceOVF(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.seed = None
+ self.seed_dir = os.path.join(paths.seed_dir, 'ovf')
+ self.environment = None
+ self.cfg = {}
+ self.supported_seed_starts = ("/", "file://")
def __str__(self):
- mstr = "DataSourceOVF"
- mstr = mstr + " [seed=%s]" % self.seed
- return(mstr)
+ return "%s [seed=%s]" % (util.obj_name(self), self.seed)
def get_data(self):
found = []
@@ -52,26 +51,24 @@ class DataSourceOVF(DataSource.DataSource):
ud = ""
defaults = {
- "instance-id": "iid-dsovf"
+ "instance-id": "iid-dsovf",
}
- (seedfile, contents) = get_ovf_env(base_seeddir)
+ (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
if seedfile:
- # found a seed dir
- seed = "%s/%s" % (base_seeddir, seedfile)
+ # Found a seed dir
+ seed = os.path.join(self.paths.seed_dir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
-
found.append(seed)
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
name = None
- for name, transfunc in np.iteritems():
+ for (name, transfunc) in np.iteritems():
(contents, _dev, _fname) = transfunc()
if contents:
break
-
if contents:
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
@@ -89,17 +86,19 @@ class DataSourceOVF(DataSource.DataSource):
seedfound = proto
break
if not seedfound:
- log.debug("seed from %s not supported by %s" %
- (seedfrom, self.__class__))
+ LOG.debug("Seed from %s not supported by %s",
+ seedfrom, self)
return False
(md_seed, ud) = util.read_seeded(seedfrom, timeout=None)
- log.debug("using seeded cache data from %s" % seedfrom)
+ LOG.debug("Using seeded cache data from %s", seedfrom)
md = util.mergedict(md, md_seed)
found.append(seedfrom)
+ # Now that we have exhausted any other places merge in the defaults
md = util.mergedict(md, defaults)
+
self.seed = ",".join(found)
self.metadata = md
self.userdata_raw = ud
@@ -108,31 +107,37 @@ class DataSourceOVF(DataSource.DataSource):
def get_public_ssh_keys(self):
if not 'public-keys' in self.metadata:
- return([])
- return([self.metadata['public-keys'], ])
+ return []
+ pks = self.metadata['public-keys']
+ if isinstance(pks, (list)):
+ return pks
+ else:
+ return [pks]
- # the data sources' config_obj is a cloud-config formated
+ # The data sources' config_obj is a cloud-config formatted
# object that came to it from ways other than cloud-config
# because cloud-config content would be handled elsewhere
def get_config_obj(self):
- return(self.cfg)
+ return self.cfg
class DataSourceOVFNet(DataSourceOVF):
- seeddir = base_seeddir + '/ovf-net'
- supported_seed_starts = ("http://", "https://", "ftp://")
+ def __init__(self, sys_cfg, distro, paths):
+ DataSourceOVF.__init__(self, sys_cfg, distro, paths)
+ self.seed_dir = os.path.join(paths.seed_dir, 'ovf-net')
+ self.supported_seed_starts = ("http://", "https://", "ftp://")
-# this will return a dict with some content
-# meta-data, user-data
+# This will return a dict with some content
+# meta-data, user-data, some config
def read_ovf_environment(contents):
- props = getProperties(contents)
+ props = get_properties(contents)
md = {}
cfg = {}
ud = ""
- cfg_props = ['password', ]
+ cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for prop, val in props.iteritems():
+ for (prop, val) in props.iteritems():
if prop == 'hostname':
prop = "local-hostname"
if prop in md_props:
@@ -144,23 +149,25 @@ def read_ovf_environment(contents):
ud = base64.decodestring(val)
except:
ud = val
- return(md, ud, cfg)
+ return (md, ud, cfg)
-# returns tuple of filename (in 'dirname', and the contents of the file)
+# Returns tuple of filename (in 'dirname', and the contents of the file)
# on "not found", returns 'None' for filename and False for contents
def get_ovf_env(dirname):
env_names = ("ovf-env.xml", "ovf_env.xml", "OVF_ENV.XML", "OVF-ENV.XML")
for fname in env_names:
- if os.path.isfile("%s/%s" % (dirname, fname)):
- fp = open("%s/%s" % (dirname, fname))
- contents = fp.read()
- fp.close()
- return(fname, contents)
- return(None, False)
+ full_fn = os.path.join(dirname, fname)
+ if os.path.isfile(full_fn):
+ try:
+ contents = util.load_file(full_fn)
+ return (fname, contents)
+ except:
+ util.logexc(LOG, "Failed loading ovf file %s", full_fn)
+ return (None, False)
-# transport functions take no input and return
+# Transport functions take no input and return
# a 3 tuple of content, path, filename
def transport_iso9660(require_iso=True):
@@ -173,79 +180,46 @@ def transport_iso9660(require_iso=True):
devname_regex = os.environ.get(envname, default_regex)
cdmatch = re.compile(devname_regex)
- # go through mounts to see if it was already mounted
- fp = open("/proc/mounts")
- mounts = fp.readlines()
- fp.close()
-
- mounted = {}
- for mpline in mounts:
- (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
- mounted[dev] = (dev, fstype, mp, False)
- mp = mp.replace("\\040", " ")
+ # Go through mounts to see if it was already mounted
+ mounts = util.mounts()
+ for (dev, info) in mounts.iteritems():
+ fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
-
- if cdmatch.match(dev[5:]) == None: # take off '/dev/'
+ if cdmatch.match(dev[5:]) is None: # take off '/dev/'
continue
-
+ mp = info['mountpoint']
(fname, contents) = get_ovf_env(mp)
if contents is not False:
- return(contents, dev, fname)
-
- tmpd = None
- dvnull = None
+ return (contents, dev, fname)
devs = os.listdir("/dev/")
devs.sort()
-
for dev in devs:
- fullp = "/dev/%s" % dev
+ fullp = os.path.join("/dev/", dev)
- if fullp in mounted or not cdmatch.match(dev) or os.path.isdir(fullp):
+ if (fullp in mounts or
+ not cdmatch.match(dev) or os.path.isdir(fullp)):
continue
- fp = None
try:
- fp = open(fullp, "rb")
- fp.read(512)
- fp.close()
+ # See if we can read anything at all...??
+ with open(fullp, 'rb') as fp:
+ fp.read(512)
except:
- if fp:
- fp.close()
continue
- if tmpd is None:
- tmpd = tempfile.mkdtemp()
- if dvnull is None:
- try:
- dvnull = open("/dev/null")
- except:
- pass
-
- cmd = ["mount", "-o", "ro", fullp, tmpd]
- if require_iso:
- cmd.extend(('-t', 'iso9660'))
-
- rc = subprocess.call(cmd, stderr=dvnull, stdout=dvnull, stdin=dvnull)
- if rc:
+ try:
+ (fname, contents) = util.mount_cb(fullp,
+ get_ovf_env, mtype="iso9660")
+ except util.MountFailedError:
+ util.logexc(LOG, "Failed mounting %s", fullp)
continue
- (fname, contents) = get_ovf_env(tmpd)
-
- subprocess.call(["umount", tmpd])
-
if contents is not False:
- os.rmdir(tmpd)
- return(contents, fullp, fname)
-
- if tmpd:
- os.rmdir(tmpd)
-
- if dvnull:
- dvnull.close()
+ return (contents, fullp, fname)
- return(False, None, None)
+ return (False, None, None)
def transport_vmware_guestd():
@@ -259,74 +233,61 @@ def transport_vmware_guestd():
# # would need to error check here and see why this failed
# # to know if log/error should be raised
# return(False, None, None)
- return(False, None, None)
+ return (False, None, None)
-def findChild(node, filter_func):
+def find_child(node, filter_func):
ret = []
if not node.hasChildNodes():
return ret
for child in node.childNodes:
if filter_func(child):
ret.append(child)
- return(ret)
+ return ret
-def getProperties(environString):
- dom = minidom.parseString(environString)
+def get_properties(contents):
+
+ dom = minidom.parseString(contents)
if dom.documentElement.localName != "Environment":
- raise Exception("No Environment Node")
+ raise XmlError("No Environment Node")
if not dom.documentElement.hasChildNodes():
- raise Exception("No Child Nodes")
+ raise XmlError("No Child Nodes")
envNsURI = "http://schemas.dmtf.org/ovf/environment/1"
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
- propSections = findChild(dom.documentElement,
+ propSections = find_child(dom.documentElement,
lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
- raise Exception("No 'PropertySection's")
+ raise XmlError("No 'PropertySection's")
props = {}
- propElems = findChild(propSections[0], lambda n: n.localName == "Property")
+ propElems = find_child(propSections[0],
+ (lambda n: n.localName == "Property"))
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
val = elem.attributes.getNamedItemNS(envNsURI, "value").value
props[key] = val
- return(props)
+ return props
+
+
+class XmlError(Exception):
+ pass
+# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (DataSource.DEP_FILESYSTEM, )),
- (DataSourceOVFNet,
- (DataSource.DEP_FILESYSTEM, DataSource.DEP_NETWORK)),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
-# return a list of data sources that match this set of dependencies
+# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
- return(DataSource.list_from_depends(depends, datasources))
-
-
-if __name__ == "__main__":
- def main():
- import sys
- envStr = open(sys.argv[1]).read()
- props = getProperties(envStr)
- import pprint
- pprint.pprint(props)
-
- md, ud, cfg = read_ovf_environment(envStr)
- print "=== md ==="
- pprint.pprint(md)
- print "=== ud ==="
- pprint.pprint(ud)
- print "=== cfg ==="
- pprint.pprint(cfg)
-
- main()
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
new file mode 100644
index 00000000..b25724a5
--- /dev/null
+++ b/cloudinit/sources/__init__.py
@@ -0,0 +1,223 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import abc
+
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import user_data as ud
+from cloudinit import util
+
+DEP_FILESYSTEM = "FILESYSTEM"
+DEP_NETWORK = "NETWORK"
+DS_PREFIX = 'DataSource'
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceNotFoundException(Exception):
+ pass
+
+
+class DataSource(object):
+
+ __metaclass__ = abc.ABCMeta
+
+ def __init__(self, sys_cfg, distro, paths, ud_proc=None):
+ self.sys_cfg = sys_cfg
+ self.distro = distro
+ self.paths = paths
+ self.userdata = None
+ self.metadata = None
+ self.userdata_raw = None
+ name = util.obj_name(self)
+ if name.startswith(DS_PREFIX):
+ name = name[len(DS_PREFIX):]
+ self.ds_cfg = util.get_cfg_by_path(self.sys_cfg,
+ ("datasource", name), {})
+ if not ud_proc:
+ self.ud_proc = ud.UserDataProcessor(self.paths)
+ else:
+ self.ud_proc = ud_proc
+
+ def get_userdata(self):
+ if self.userdata is None:
+ raw_data = self.get_userdata_raw()
+ self.userdata = self.ud_proc.process(raw_data)
+ return self.userdata
+
+ def get_userdata_raw(self):
+ return self.userdata_raw
+
+ # the data sources' config_obj is a cloud-config formated
+ # object that came to it from ways other than cloud-config
+ # because cloud-config content would be handled elsewhere
+ def get_config_obj(self):
+ return {}
+
+ def get_public_ssh_keys(self):
+ keys = []
+
+ if not self.metadata or 'public-keys' not in self.metadata:
+ return keys
+
+ if isinstance(self.metadata['public-keys'], (basestring, str)):
+ return str(self.metadata['public-keys']).splitlines()
+
+ if isinstance(self.metadata['public-keys'], (list, set)):
+ return list(self.metadata['public-keys'])
+
+ if isinstance(self.metadata['public-keys'], (dict)):
+ for (_keyname, klist) in self.metadata['public-keys'].iteritems():
+ # lp:506332 uec metadata service responds with
+ # data that makes boto populate a string for 'klist' rather
+ # than a list.
+ if isinstance(klist, (str, basestring)):
+ klist = [klist]
+ if isinstance(klist, (list, set)):
+ for pkey in klist:
+ # There is an empty string at
+ # the end of the keylist, trim it
+ if pkey:
+ keys.append(pkey)
+
+ return keys
+
+ def device_name_to_device(self, _name):
+ # translate a 'name' to a device
+ # the primary function at this point is on ec2
+ # to consult metadata service, that has
+ # ephemeral0: sdb
+ # and return 'sdb' for input 'ephemeral0'
+ return None
+
+ def get_locale(self):
+ return 'en_US.UTF-8'
+
+ def get_local_mirror(self):
+ # ??
+ return None
+
+ def get_instance_id(self):
+ if not self.metadata or 'instance-id' not in self.metadata:
+ # Return a magic not really instance id string
+ return "iid-datasource"
+ return str(self.metadata['instance-id'])
+
+ def get_hostname(self, fqdn=False):
+ defdomain = "localdomain"
+ defhost = "localhost"
+ domain = defdomain
+
+ if not self.metadata or not 'local-hostname' in self.metadata:
+ # this is somewhat questionable really.
+ # the cloud datasource was asked for a hostname
+ # and didn't have one. raising error might be more appropriate
+ # but instead, basically look up the existing hostname
+ toks = []
+ hostname = util.get_hostname()
+ fqdn = util.get_fqdn_from_hosts(hostname)
+ if fqdn and fqdn.find(".") > 0:
+ toks = str(fqdn).split(".")
+ elif hostname:
+ toks = [hostname, defdomain]
+ else:
+ toks = [defhost, defdomain]
+ else:
+ # if there is an ipv4 address in 'local-hostname', then
+ # make up a hostname (LP: #475354) in format ip-xx.xx.xx.xx
+ lhost = self.metadata['local-hostname']
+ if util.is_ipv4(lhost):
+ toks = "ip-%s" % lhost.replace(".", "-")
+ else:
+ toks = lhost.split(".")
+
+ if len(toks) > 1:
+ hostname = toks[0]
+ domain = '.'.join(toks[1:])
+ else:
+ hostname = toks[0]
+
+ if fqdn:
+ return "%s.%s" % (hostname, domain)
+ else:
+ return hostname
+
+
+def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
+ ds_list = list_sources(cfg_list, ds_deps, pkg_list)
+ ds_names = [util.obj_name(f) for f in ds_list]
+ LOG.debug("Searching for data source in: %s", ds_names)
+
+ for cls in ds_list:
+ try:
+ LOG.debug("Seeing if we can get any data from %s", cls)
+ s = cls(sys_cfg, distro, paths)
+ if s.get_data():
+ return (s, util.obj_name(cls))
+ except Exception:
+ util.logexc(LOG, "Getting data from %s failed", cls)
+
+ msg = ("Did not find any data source,"
+ " searched classes: (%s)") % (", ".join(ds_names))
+ raise DataSourceNotFoundException(msg)
+
+
+# Return a list of classes that have the same depends as 'depends'
+# iterate through cfg_list, loading "DataSource*" modules
+# and calling their "get_datasource_list".
+# Return an ordered list of classes that match (if any)
+def list_sources(cfg_list, depends, pkg_list):
+ src_list = []
+ LOG.debug(("Looking for for data source in: %s,"
+ " via packages %s that matches dependencies %s"),
+ cfg_list, pkg_list, depends)
+ for ds_name in cfg_list:
+ if not ds_name.startswith(DS_PREFIX):
+ ds_name = '%s%s' % (DS_PREFIX, ds_name)
+ m_locs = importer.find_module(ds_name,
+ pkg_list,
+ ['get_datasource_list'])
+ for m_loc in m_locs:
+ mod = importer.import_module(m_loc)
+ lister = getattr(mod, "get_datasource_list")
+ matches = lister(depends)
+ if matches:
+ src_list.extend(matches)
+ break
+ return src_list
+
+
+# 'depends' is a list of dependencies (DEP_FILESYSTEM)
+# ds_list is a list of 2 item lists
+# ds_list = [
+# ( class, ( depends-that-this-class-needs ) )
+# }
+# It returns a list of 'class' that matched these deps exactly
+# It mainly is a helper function for DataSourceCollections
+def list_from_depends(depends, ds_list):
+ ret_list = []
+ depset = set(depends)
+ for (cls, deps) in ds_list:
+ if depset == set(deps):
+ ret_list.append(cls)
+ return ret_list
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
new file mode 100644
index 00000000..e0a2f0ca
--- /dev/null
+++ b/cloudinit/ssh_util.py
@@ -0,0 +1,314 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from StringIO import StringIO
+
+import csv
+import os
+import pwd
+
+from cloudinit import log as logging
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+# See: man sshd_config
+DEF_SSHD_CFG = "/etc/ssh/sshd_config"
+
+
+class AuthKeyLine(object):
+ def __init__(self, source, keytype=None, base64=None,
+ comment=None, options=None):
+ self.base64 = base64
+ self.comment = comment
+ self.options = options
+ self.keytype = keytype
+ self.source = source
+
+ def empty(self):
+ if (not self.base64 and
+ not self.comment and not self.keytype and not self.options):
+ return True
+ return False
+
+ def __str__(self):
+ toks = []
+ if self.options:
+ toks.append(self.options)
+ if self.keytype:
+ toks.append(self.keytype)
+ if self.base64:
+ toks.append(self.base64)
+ if self.comment:
+ toks.append(self.comment)
+ if not toks:
+ return self.source
+ else:
+ return ' '.join(toks)
+
+
+class AuthKeyLineParser(object):
+ """
+ AUTHORIZED_KEYS FILE FORMAT
+ AuthorizedKeysFile specifies the file containing public keys for public
+ key authentication; if none is specified, the default is
+ ~/.ssh/authorized_keys. Each line of the file contains one key (empty
+ (because of the size of the public key encoding) up to a limit of 8 kilo-
+ bytes, which permits DSA keys up to 8 kilobits and RSA keys up to 16
+ kilobits. You don't want to type them in; instead, copy the
+ identity.pub, id_dsa.pub, or the id_rsa.pub file and edit it.
+
+ sshd enforces a minimum RSA key modulus size for protocol 1 and protocol
+ 2 keys of 768 bits.
+
+ The options (if present) consist of comma-separated option specifica-
+ tions. No spaces are permitted, except within double quotes. The fol-
+ lowing option specifications are supported (note that option keywords are
+ case-insensitive):
+ """
+
+ def _extract_options(self, ent):
+ """
+ The options (if present) consist of comma-separated option specifica-
+ tions. No spaces are permitted, except within double quotes.
+ Note that option keywords are case-insensitive.
+ """
+ quoted = False
+ i = 0
+ while (i < len(ent) and
+ ((quoted) or (ent[i] not in (" ", "\t")))):
+ curc = ent[i]
+ if i + 1 >= len(ent):
+ i = i + 1
+ break
+ nextc = ent[i + 1]
+ if curc == "\\" and nextc == '"':
+ i = i + 1
+ elif curc == '"':
+ quoted = not quoted
+ i = i + 1
+
+ options = ent[0:i]
+ options_lst = []
+
+ # Now use a csv parser to pull the options
+ # out of the above string that we just found an endpoint for.
+ #
+ # No quoting so we don't mess up any of the quoting that
+ # is already there.
+ reader = csv.reader(StringIO(options), quoting=csv.QUOTE_NONE)
+ for row in reader:
+ for e in row:
+ # Only keep non-empty csv options
+ e = e.strip()
+ if e:
+ options_lst.append(e)
+
+ # Now take the rest of the items before the string
+ # as long as there is room to do this...
+ toks = []
+ if i + 1 < len(ent):
+ rest = ent[i + 1:]
+ toks = rest.split(None, 2)
+ return (options_lst, toks)
+
+ def _form_components(self, src_line, toks, options=None):
+ components = {}
+ if len(toks) == 1:
+ components['base64'] = toks[0]
+ elif len(toks) == 2:
+ components['base64'] = toks[0]
+ components['comment'] = toks[1]
+ elif len(toks) == 3:
+ components['keytype'] = toks[0]
+ components['base64'] = toks[1]
+ components['comment'] = toks[2]
+ components['options'] = options
+ if not components:
+ return AuthKeyLine(src_line)
+ else:
+ return AuthKeyLine(src_line, **components)
+
+ def parse(self, src_line, def_opt=None):
+ line = src_line.rstrip("\r\n")
+ if line.startswith("#") or line.strip() == '':
+ return AuthKeyLine(src_line)
+ else:
+ ent = line.strip()
+ toks = ent.split(None, 3)
+ if len(toks) < 4:
+ return self._form_components(src_line, toks, def_opt)
+ else:
+ (options, toks) = self._extract_options(ent)
+ if options:
+ options = ",".join(options)
+ else:
+ options = def_opt
+ return self._form_components(src_line, toks, options)
+
+
+def parse_authorized_keys(fname):
+ lines = []
+ try:
+ if os.path.isfile(fname):
+ lines = util.load_file(fname).splitlines()
+ except (IOError, OSError):
+ util.logexc(LOG, "Error reading lines from %s", fname)
+ lines = []
+
+ parser = AuthKeyLineParser()
+ contents = []
+ for line in lines:
+ contents.append(parser.parse(line))
+ return contents
+
+
+def update_authorized_keys(fname, keys):
+ entries = parse_authorized_keys(fname)
+ to_add = list(keys)
+
+ for i in range(0, len(entries)):
+ ent = entries[i]
+ if ent.empty() or not ent.base64:
+ continue
+ # Replace those with the same base64
+ for k in keys:
+ if k.empty() or not k.base64:
+ continue
+ if k.base64 == ent.base64:
+ # Replace it with our better one
+ ent = k
+ # Don't add it later
+ if k in to_add:
+ to_add.remove(k)
+ entries[i] = ent
+
+ # Now append any entries we did not match above
+ for key in to_add:
+ entries.append(key)
+
+ # Now format them back to strings...
+ lines = [str(b) for b in entries]
+
+ # Ensure it ends with a newline
+ lines.append('')
+ return '\n'.join(lines)
+
+
+def setup_user_keys(keys, user, key_prefix, paths):
+ # Make sure the users .ssh dir is setup accordingly
+ pwent = pwd.getpwnam(user)
+ ssh_dir = os.path.join(pwent.pw_dir, '.ssh')
+ ssh_dir = paths.join(False, ssh_dir)
+ if not os.path.exists(ssh_dir):
+ util.ensure_dir(ssh_dir, mode=0700)
+ util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
+
+ # Turn the keys given into actual entries
+ parser = AuthKeyLineParser()
+ key_entries = []
+ for k in keys:
+ key_entries.append(parser.parse(str(k), def_opt=key_prefix))
+
+ sshd_conf_fn = paths.join(True, DEF_SSHD_CFG)
+ with util.SeLinuxGuard(ssh_dir, recursive=True):
+ try:
+ # AuthorizedKeysFile may contain tokens
+ # of the form %T which are substituted during connection set-up.
+ # The following tokens are defined: %% is replaced by a literal
+ # '%', %h is replaced by the home directory of the user being
+ # authenticated and %u is replaced by the username of that user.
+ ssh_cfg = parse_ssh_config_map(sshd_conf_fn)
+ akeys = ssh_cfg.get("authorizedkeysfile", '')
+ akeys = akeys.strip()
+ if not akeys:
+ akeys = "%h/.ssh/authorized_keys"
+ akeys = akeys.replace("%h", pwent.pw_dir)
+ akeys = akeys.replace("%u", user)
+ akeys = akeys.replace("%%", '%')
+ if not akeys.startswith('/'):
+ akeys = os.path.join(pwent.pw_dir, akeys)
+ authorized_keys = paths.join(False, akeys)
+ except (IOError, OSError):
+ authorized_keys = os.path.join(ssh_dir, 'authorized_keys')
+ util.logexc(LOG, ("Failed extracting 'AuthorizedKeysFile'"
+ " in ssh config"
+ " from %s, using 'AuthorizedKeysFile' file"
+ " %s instead"),
+ sshd_conf_fn, authorized_keys)
+
+ content = update_authorized_keys(authorized_keys, key_entries)
+ util.ensure_dir(os.path.dirname(authorized_keys), mode=0700)
+ util.write_file(authorized_keys, content, mode=0600)
+ util.chownbyid(authorized_keys, pwent.pw_uid, pwent.pw_gid)
+
+
+class SshdConfigLine(object):
+ def __init__(self, line, k=None, v=None):
+ self.line = line
+ self._key = k
+ self.value = v
+
+ @property
+ def key(self):
+ if self._key is None:
+ return None
+ # Keywords are case-insensitive
+ return self._key.lower()
+
+ def __str__(self):
+ if self._key is None:
+ return str(self.line)
+ else:
+ v = str(self._key)
+ if self.value:
+ v += " " + str(self.value)
+ return v
+
+
+def parse_ssh_config(fname):
+ # See: man sshd_config
+ # The file contains keyword-argument pairs, one per line.
+ # Lines starting with '#' and empty lines are interpreted as comments.
+ # Note: key-words are case-insensitive and arguments are case-sensitive
+ lines = []
+ if not os.path.isfile(fname):
+ return lines
+ for line in util.load_file(fname).splitlines():
+ line = line.strip()
+ if not line or line.startswith("#"):
+ lines.append(SshdConfigLine(line))
+ continue
+ (key, val) = line.split(None, 1)
+ lines.append(SshdConfigLine(line, key, val))
+ return lines
+
+
+def parse_ssh_config_map(fname):
+ lines = parse_ssh_config(fname)
+ if not lines:
+ return {}
+ ret = {}
+ for line in lines:
+ if not line.key:
+ continue
+ ret[line.key] = line.value
+ return ret
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
new file mode 100644
index 00000000..8fd6aa5d
--- /dev/null
+++ b/cloudinit/stages.py
@@ -0,0 +1,551 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import cPickle as pickle
+
+import copy
+import os
+import sys
+
+from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
+
+from cloudinit import handlers
+
+# Default handlers (used if not overridden)
+from cloudinit.handlers import boot_hook as bh_part
+from cloudinit.handlers import cloud_config as cc_part
+from cloudinit.handlers import shell_script as ss_part
+from cloudinit.handlers import upstart_job as up_part
+
+from cloudinit import cloud
+from cloudinit import config
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+
+class Init(object):
+ def __init__(self, ds_deps=None):
+ if ds_deps is not None:
+ self.ds_deps = ds_deps
+ else:
+ self.ds_deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
+ # Created on first use
+ self._cfg = None
+ self._paths = None
+ self._distro = None
+ # Created only when a fetch occurs
+ self.datasource = None
+
+ @property
+ def distro(self):
+ if not self._distro:
+ # Try to find the right class to use
+ scfg = self._extract_cfg('system')
+ name = scfg.pop('distro', 'ubuntu')
+ cls = distros.fetch(name)
+ LOG.debug("Using distro class %s", cls)
+ self._distro = cls(name, scfg, self.paths)
+ return self._distro
+
+ @property
+ def cfg(self):
+ return self._extract_cfg('restricted')
+
+ def _extract_cfg(self, restriction):
+ # Ensure actually read
+ self.read_cfg()
+ # Nobody gets the real config
+ ocfg = copy.deepcopy(self._cfg)
+ if restriction == 'restricted':
+ ocfg.pop('system_info', None)
+ elif restriction == 'system':
+ ocfg = util.get_cfg_by_path(ocfg, ('system_info',), {})
+ elif restriction == 'paths':
+ ocfg = util.get_cfg_by_path(ocfg, ('system_info', 'paths'), {})
+ if not isinstance(ocfg, (dict)):
+ ocfg = {}
+ return ocfg
+
+ @property
+ def paths(self):
+ if not self._paths:
+ path_info = self._extract_cfg('paths')
+ self._paths = helpers.Paths(path_info, self.datasource)
+ return self._paths
+
+ def _initial_subdirs(self):
+ c_dir = self.paths.cloud_dir
+ initial_dirs = [
+ c_dir,
+ os.path.join(c_dir, 'scripts'),
+ os.path.join(c_dir, 'scripts', 'per-instance'),
+ os.path.join(c_dir, 'scripts', 'per-once'),
+ os.path.join(c_dir, 'scripts', 'per-boot'),
+ os.path.join(c_dir, 'seed'),
+ os.path.join(c_dir, 'instances'),
+ os.path.join(c_dir, 'handlers'),
+ os.path.join(c_dir, 'sem'),
+ os.path.join(c_dir, 'data'),
+ ]
+ return initial_dirs
+
+ def purge_cache(self, rm_instance_lnk=True):
+ rm_list = []
+ rm_list.append(self.paths.boot_finished)
+ if rm_instance_lnk:
+ rm_list.append(self.paths.instance_link)
+ for f in rm_list:
+ util.del_file(f)
+ return len(rm_list)
+
+ def initialize(self):
+ self._initialize_filesystem()
+
+ def _initialize_filesystem(self):
+ util.ensure_dirs(self._initial_subdirs())
+ log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
+ perms = util.get_cfg_option_str(self.cfg, 'syslog_fix_perms')
+ if log_file:
+ util.ensure_file(log_file)
+ if perms:
+ (u, g) = perms.split(':', 1)
+ if u == "-1" or u == "None":
+ u = None
+ if g == "-1" or g == "None":
+ g = None
+ util.chownbyname(log_file, u, g)
+
+ def read_cfg(self, extra_fns=None):
+ # None check so that we don't keep on re-loading if empty
+ if self._cfg is None:
+ self._cfg = self._read_cfg(extra_fns)
+ # LOG.debug("Loaded 'init' config %s", self._cfg)
+
+ def _read_base_cfg(self):
+ base_cfgs = []
+ default_cfg = util.get_builtin_cfg()
+ kern_contents = util.read_cc_from_cmdline()
+ # Kernel/cmdline parameters override system config
+ if kern_contents:
+ base_cfgs.append(util.load_yaml(kern_contents, default={}))
+ # Anything in your conf.d location??
+ # or the 'default' cloud.cfg location???
+ base_cfgs.append(util.read_conf_with_confd(CLOUD_CONFIG))
+ # And finally the default gets to play
+ if default_cfg:
+ base_cfgs.append(default_cfg)
+ return util.mergemanydict(base_cfgs)
+
+ def _read_cfg(self, extra_fns):
+ no_cfg_paths = helpers.Paths({}, self.datasource)
+ merger = helpers.ConfigMerger(paths=no_cfg_paths,
+ datasource=self.datasource,
+ additional_fns=extra_fns,
+ base_cfg=self._read_base_cfg())
+ return merger.cfg
+
+ def _restore_from_cache(self):
+ # We try to restore from a current link and static path
+ # by using the instance link, if purge_cache was called
+ # the file wont exist.
+ pickled_fn = self.paths.get_ipath_cur('obj_pkl')
+ pickle_contents = None
+ try:
+ pickle_contents = util.load_file(pickled_fn)
+ except Exception:
+ pass
+ # This is expected so just return nothing
+ # successfully loaded...
+ if not pickle_contents:
+ return None
+ try:
+ return pickle.loads(pickle_contents)
+ except Exception:
+ util.logexc(LOG, "Failed loading pickled blob from %s", pickled_fn)
+ return None
+
+ def _write_to_cache(self):
+ if not self.datasource:
+ return False
+ pickled_fn = self.paths.get_ipath_cur("obj_pkl")
+ try:
+ pk_contents = pickle.dumps(self.datasource)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource %s", self.datasource)
+ return False
+ try:
+ util.write_file(pickled_fn, pk_contents, mode=0400)
+ except Exception:
+ util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn)
+ return False
+ return True
+
+ def _get_datasources(self):
+ # Any config provided???
+ pkg_list = self.cfg.get('datasource_pkg_list') or []
+ # Add the defaults at the end
+ for n in ['', util.obj_name(sources)]:
+ if n not in pkg_list:
+ pkg_list.append(n)
+ cfg_list = self.cfg.get('datasource_list') or []
+ return (cfg_list, pkg_list)
+
+ def _get_data_source(self):
+ if self.datasource:
+ return self.datasource
+ ds = self._restore_from_cache()
+ if ds:
+ LOG.debug("Restored from cache, datasource: %s", ds)
+ if not ds:
+ (cfg_list, pkg_list) = self._get_datasources()
+ # Deep copy so that user-data handlers can not modify
+ # (which will affect user-data handlers down the line...)
+ (ds, dsname) = sources.find_source(self.cfg,
+ self.distro,
+ self.paths,
+ copy.deepcopy(self.ds_deps),
+ cfg_list,
+ pkg_list)
+ LOG.debug("Loaded datasource %s - %s", dsname, ds)
+ self.datasource = ds
+ # Ensure we adjust our path members datasource
+ # now that we have one (thus allowing ipath to be used)
+ self.paths.datasource = ds
+ return ds
+
+ def _get_instance_subdirs(self):
+ return ['handlers', 'scripts', 'sems']
+
+ def _get_ipath(self, subname=None):
+ # Force a check to see if anything
+ # actually comes back, if not
+ # then a datasource has not been assigned...
+ instance_dir = self.paths.get_ipath(subname)
+ if not instance_dir:
+ raise RuntimeError(("No instance directory is available."
+ " Has a datasource been fetched??"))
+ return instance_dir
+
+ def _reflect_cur_instance(self):
+ # Remove the old symlink and attach a new one so
+ # that further reads/writes connect into the right location
+ idir = self._get_ipath()
+ util.del_file(self.paths.instance_link)
+ util.sym_link(idir, self.paths.instance_link)
+
+ # Ensures these dirs exist
+ dir_list = []
+ for d in self._get_instance_subdirs():
+ dir_list.append(os.path.join(idir, d))
+ util.ensure_dirs(dir_list)
+
+ # Write out information on what is being used for the current instance
+ # and what may have been used for a previous instance...
+ dp = self.paths.get_cpath('data')
+
+ # Write what the datasource was and is..
+ ds = "%s: %s" % (util.obj_name(self.datasource), self.datasource)
+ previous_ds = None
+ ds_fn = os.path.join(idir, 'datasource')
+ try:
+ previous_ds = util.load_file(ds_fn).strip()
+ except Exception:
+ pass
+ if not previous_ds:
+ previous_ds = ds
+ util.write_file(ds_fn, "%s\n" % ds)
+ util.write_file(os.path.join(dp, 'previous-datasource'),
+ "%s\n" % (previous_ds))
+
+ # What the instance id was and is...
+ iid = self.datasource.get_instance_id()
+ previous_iid = None
+ iid_fn = os.path.join(dp, 'instance-id')
+ try:
+ previous_iid = util.load_file(iid_fn).strip()
+ except Exception:
+ pass
+ if not previous_iid:
+ previous_iid = iid
+ util.write_file(iid_fn, "%s\n" % iid)
+ util.write_file(os.path.join(dp, 'previous-instance-id'),
+ "%s\n" % (previous_iid))
+ return iid
+
+ def fetch(self):
+ return self._get_data_source()
+
+ def instancify(self):
+ return self._reflect_cur_instance()
+
+ def cloudify(self):
+ # Form the needed options to cloudify our members
+ return cloud.Cloud(self.datasource,
+ self.paths, self.cfg,
+ self.distro, helpers.Runners(self.paths))
+
+ def update(self):
+ if not self._write_to_cache():
+ return
+ self._store_userdata()
+
+ def _store_userdata(self):
+ raw_ud = "%s" % (self.datasource.get_userdata_raw())
+ util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0600)
+ processed_ud = "%s" % (self.datasource.get_userdata())
+ util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
+
+ def _default_userdata_handlers(self):
+ opts = {
+ 'paths': self.paths,
+ 'datasource': self.datasource,
+ }
+ # TODO Hmmm, should we dynamically import these??
+ def_handlers = [
+ cc_part.CloudConfigPartHandler(**opts),
+ ss_part.ShellScriptPartHandler(**opts),
+ bh_part.BootHookPartHandler(**opts),
+ up_part.UpstartJobPartHandler(**opts),
+ ]
+ return def_handlers
+
+ def consume_userdata(self, frequency=PER_INSTANCE):
+ cdir = self.paths.get_cpath("handlers")
+ idir = self._get_ipath("handlers")
+
+ # Add the path to the plugins dir to the top of our list for import
+ # instance dir should be read before cloud-dir
+ if cdir and cdir not in sys.path:
+ sys.path.insert(0, cdir)
+ if idir and idir not in sys.path:
+ sys.path.insert(0, idir)
+
+ # Ensure datasource fetched before activation (just incase)
+ user_data_msg = self.datasource.get_userdata()
+
+ # This keeps track of all the active handlers
+ c_handlers = helpers.ContentHandlers()
+
+ # Add handlers in cdir
+ potential_handlers = util.find_modules(cdir)
+ for (fname, mod_name) in potential_handlers.iteritems():
+ try:
+ mod_locs = importer.find_module(mod_name, [''],
+ ['list_types',
+ 'handle_part'])
+ if not mod_locs:
+ LOG.warn(("Could not find a valid user-data handler"
+ " named %s in file %s"), mod_name, fname)
+ continue
+ mod = importer.import_module(mod_locs[0])
+ mod = handlers.fixup_handler(mod)
+ types = c_handlers.register(mod)
+ LOG.debug("Added handler for %s from %s", types, fname)
+ except:
+ util.logexc(LOG, "Failed to register handler from %s", fname)
+
+ def_handlers = self._default_userdata_handlers()
+ applied_def_handlers = c_handlers.register_defaults(def_handlers)
+ if applied_def_handlers:
+ LOG.debug("Registered default handlers: %s", applied_def_handlers)
+
+ # Form our cloud interface
+ data = self.cloudify()
+
+ # Init the handlers first
+ called = []
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod in called:
+ continue
+ handlers.call_begin(mod, data, frequency)
+ called.append(mod)
+
+ # Walk the user data
+ part_data = {
+ 'handlers': c_handlers,
+ # Any new handlers that are encountered get writen here
+ 'handlerdir': idir,
+ 'data': data,
+ # The default frequency if handlers don't have one
+ 'frequency': frequency,
+ # This will be used when new handlers are found
+ # to help write there contents to files with numbered
+ # names...
+ 'handlercount': 0,
+ }
+ handlers.walk(user_data_msg, handlers.walker_callback, data=part_data)
+
+ # Give callbacks opportunity to finalize
+ called = []
+ for (_ctype, mod) in c_handlers.iteritems():
+ if mod in called:
+ continue
+ handlers.call_end(mod, data, frequency)
+ called.append(mod)
+
+
+class Modules(object):
+ def __init__(self, init, cfg_files=None):
+ self.init = init
+ self.cfg_files = cfg_files
+ # Created on first use
+ self._cached_cfg = None
+
+ @property
+ def cfg(self):
+ # None check to avoid empty case causing re-reading
+ if self._cached_cfg is None:
+ merger = helpers.ConfigMerger(paths=self.init.paths,
+ datasource=self.init.datasource,
+ additional_fns=self.cfg_files,
+ base_cfg=self.init.cfg)
+ self._cached_cfg = merger.cfg
+ # LOG.debug("Loading 'module' config %s", self._cached_cfg)
+ # Only give out a copy so that others can't modify this...
+ return copy.deepcopy(self._cached_cfg)
+
+ def _read_modules(self, name):
+ module_list = []
+ if name not in self.cfg:
+ return module_list
+ cfg_mods = self.cfg[name]
+ # Create 'module_list', an array of hashes
+ # Where hash['mod'] = module name
+ # hash['freq'] = frequency
+ # hash['args'] = arguments
+ for item in cfg_mods:
+ if not item:
+ continue
+ if isinstance(item, (str, basestring)):
+ module_list.append({
+ 'mod': item.strip(),
+ })
+ elif isinstance(item, (list)):
+ contents = {}
+ # Meant to fall through...
+ if len(item) >= 1:
+ contents['mod'] = item[0].strip()
+ if len(item) >= 2:
+ contents['freq'] = item[1].strip()
+ if len(item) >= 3:
+ contents['args'] = item[2:]
+ if contents:
+ module_list.append(contents)
+ elif isinstance(item, (dict)):
+ contents = {}
+ valid = False
+ if 'name' in item:
+ contents['mod'] = item['name'].strip()
+ valid = True
+ if 'frequency' in item:
+ contents['freq'] = item['frequency'].strip()
+ if 'args' in item:
+ contents['args'] = item['args'] or []
+ if contents and valid:
+ module_list.append(contents)
+ else:
+ raise TypeError(("Failed to read '%s' item in config,"
+ " unknown type %s") %
+ (item, util.obj_name(item)))
+ return module_list
+
+ def _fixup_modules(self, raw_mods):
+ mostly_mods = []
+ for raw_mod in raw_mods:
+ raw_name = raw_mod['mod']
+ freq = raw_mod.get('freq')
+ run_args = raw_mod.get('args') or []
+ mod_name = config.form_module_name(raw_name)
+ if not mod_name:
+ continue
+ if freq and freq not in FREQUENCIES:
+ LOG.warn(("Config specified module %s"
+ " has an unknown frequency %s"), raw_name, freq)
+ # Reset it so when ran it will get set to a known value
+ freq = None
+ mod_locs = importer.find_module(mod_name,
+ ['', util.obj_name(config)],
+ ['handle'])
+ if not mod_locs:
+ LOG.warn("Could not find module named %s", mod_name)
+ continue
+ mod = config.fixup_module(importer.import_module(mod_locs[0]))
+ mostly_mods.append([mod, raw_name, freq, run_args])
+ return mostly_mods
+
+ def _run_modules(self, mostly_mods):
+ d_name = self.init.distro.name
+ cc = self.init.cloudify()
+ # Return which ones ran
+ # and which ones failed + the exception of why it failed
+ failures = []
+ which_ran = []
+ for (mod, name, freq, args) in mostly_mods:
+ try:
+ # Try the modules frequency, otherwise fallback to a known one
+ if not freq:
+ freq = mod.frequency
+ if not freq in FREQUENCIES:
+ freq = PER_INSTANCE
+ worked_distros = mod.distros
+ if (worked_distros and d_name not in worked_distros):
+ LOG.warn(("Module %s is verified on %s distros"
+ " but not on %s distro. It may or may not work"
+ " correctly."), name, worked_distros, d_name)
+ # Use the configs logger and not our own
+ # TODO: possibly check the module
+ # for having a LOG attr and just give it back
+ # its own logger?
+ func_args = [name, self.cfg,
+ cc, config.LOG, args]
+ # Mark it as having started running
+ which_ran.append(name)
+ # This name will affect the semaphore name created
+ run_name = "config-%s" % (name)
+ cc.run(run_name, mod.handle, func_args, freq=freq)
+ except Exception as e:
+ util.logexc(LOG, "Running %s (%s) failed", name, mod)
+ failures.append((name, e))
+ return (which_ran, failures)
+
+ def run_single(self, mod_name, args=None, freq=None):
+ # Form the users module 'specs'
+ mod_to_be = {
+ 'mod': mod_name,
+ 'args': args,
+ 'freq': freq,
+ }
+ # Now resume doing the normal fixups and running
+ raw_mods = [mod_to_be]
+ mostly_mods = self._fixup_modules(raw_mods)
+ return self._run_modules(mostly_mods)
+
+ def run_section(self, section_name):
+ raw_mods = self._read_modules(section_name)
+ mostly_mods = self._fixup_modules(raw_mods)
+ return self._run_modules(mostly_mods)
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
new file mode 100644
index 00000000..c4259fa0
--- /dev/null
+++ b/cloudinit/templater.py
@@ -0,0 +1,41 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from tempita import Template
+
+from cloudinit import util
+
+
+def render_from_file(fn, params):
+ return render_string(util.load_file(fn), params, name=fn)
+
+
+def render_to_file(fn, outfn, params, mode=0644):
+ contents = render_from_file(fn, params)
+ util.write_file(outfn, contents, mode=mode)
+
+
+def render_string(content, params, name=None):
+ tpl = Template(content, name=name)
+ if not params:
+ params = dict()
+ return tpl.substitute(params)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
new file mode 100644
index 00000000..dbf72392
--- /dev/null
+++ b/cloudinit/url_helper.py
@@ -0,0 +1,226 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from contextlib import closing
+
+import errno
+import socket
+import time
+import urllib
+import urllib2
+
+from cloudinit import log as logging
+from cloudinit import version
+
+LOG = logging.getLogger(__name__)
+
+
+class UrlResponse(object):
+ def __init__(self, status_code, contents=None, headers=None):
+ self._status_code = status_code
+ self._contents = contents
+ self._headers = headers
+
+ @property
+ def code(self):
+ return self._status_code
+
+ @property
+ def contents(self):
+ return self._contents
+
+ @property
+ def headers(self):
+ return self._headers
+
+ def __str__(self):
+ if not self.contents:
+ return ''
+ else:
+ return str(self.contents)
+
+ def ok(self, redirects_ok=False):
+ upper = 300
+ if redirects_ok:
+ upper = 400
+ if self.code >= 200 and self.code < upper:
+ return True
+ else:
+ return False
+
+
+def readurl(url, data=None, timeout=None,
+ retries=0, sec_between=1, headers=None):
+
+ req_args = {}
+ req_args['url'] = url
+ if data is not None:
+ req_args['data'] = urllib.urlencode(data)
+
+ if not headers:
+ headers = {
+ 'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
+ }
+
+ req_args['headers'] = headers
+ req = urllib2.Request(**req_args)
+
+ retries = max(retries, 0)
+ attempts = retries + 1
+
+ excepts = []
+ LOG.debug(("Attempting to open '%s' with %s attempts"
+ " (%s retries, timeout=%s) to be performed"),
+ url, attempts, retries, timeout)
+ open_args = {}
+ if timeout is not None:
+ open_args['timeout'] = int(timeout)
+ for i in range(0, attempts):
+ try:
+ with closing(urllib2.urlopen(req, **open_args)) as rh:
+ content = rh.read()
+ status = rh.getcode()
+ if status is None:
+ # This seems to happen when files are read...
+ status = 200
+ headers = {}
+ if rh.headers:
+ headers = dict(rh.headers)
+ LOG.debug("Read from %s (%s, %sb) after %s attempts",
+ url, status, len(content), (i + 1))
+ return UrlResponse(status, content, headers)
+ except urllib2.HTTPError as e:
+ excepts.append(e)
+ except urllib2.URLError as e:
+ # This can be a message string or
+ # another exception instance
+ # (socket.error for remote URLs, OSError for local URLs).
+ if (isinstance(e.reason, (OSError)) and
+ e.reason.errno == errno.ENOENT):
+ excepts.append(e.reason)
+ else:
+ excepts.append(e)
+ except Exception as e:
+ excepts.append(e)
+ if i + 1 < attempts:
+ LOG.debug("Please wait %s seconds while we wait to try again",
+ sec_between)
+ time.sleep(sec_between)
+
+ # Didn't work out
+ LOG.warn("Failed reading from %s after %s attempts", url, attempts)
+
+ # It must of errored at least once for code
+ # to get here so re-raise the last error
+ LOG.debug("%s errors occured, re-raising the last one", len(excepts))
+ raise excepts[-1]
+
+
+def wait_for_url(urls, max_wait=None, timeout=None,
+ status_cb=None, headers_cb=None, sleep_time=1):
+ """
+ urls: a list of urls to try
+ max_wait: roughly the maximum time to wait before giving up
+ The max time is *actually* len(urls)*timeout as each url will
+ be tried once and given the timeout provided.
+ timeout: the timeout provided to urllib2.urlopen
+ status_cb: call method with string message when a url is not available
+ headers_cb: call method with single argument of url to get headers
+ for request.
+
+ the idea of this routine is to wait for the EC2 metdata service to
+ come up. On both Eucalyptus and EC2 we have seen the case where
+ the instance hit the MD before the MD service was up. EC2 seems
+ to have permenantely fixed this, though.
+
+ In openstack, the metadata service might be painfully slow, and
+ unable to avoid hitting a timeout of even up to 10 seconds or more
+ (LP: #894279) for a simple GET.
+
+ Offset those needs with the need to not hang forever (and block boot)
+ on a system where cloud-init is configured to look for EC2 Metadata
+ service but is not going to find one. It is possible that the instance
+ data host (169.254.169.254) may be firewalled off Entirely for a sytem,
+ meaning that the connection will block forever unless a timeout is set.
+ """
+ start_time = time.time()
+
+ def log_status_cb(msg):
+ LOG.debug(msg)
+
+ if status_cb is None:
+ status_cb = log_status_cb
+
+ def timeup(max_wait, start_time):
+ return ((max_wait <= 0 or max_wait is None) or
+ (time.time() - start_time > max_wait))
+
+ loop_n = 0
+ while True:
+ sleep_time = int(loop_n / 5) + 1
+ for url in urls:
+ now = time.time()
+ if loop_n != 0:
+ if timeup(max_wait, start_time):
+ break
+ if timeout and (now + timeout > (start_time + max_wait)):
+ # shorten timeout to not run way over max_time
+ timeout = int((start_time + max_wait) - now)
+
+ reason = ""
+ try:
+ if headers_cb is not None:
+ headers = headers_cb(url)
+ else:
+ headers = {}
+
+ resp = readurl(url, headers=headers, timeout=timeout)
+ if not resp.contents:
+ reason = "empty response [%s]" % (resp.code)
+ elif not resp.ok():
+ reason = "bad status code [%s]" % (resp.code)
+ else:
+ return url
+ except urllib2.HTTPError as e:
+ reason = "http error [%s]" % e.code
+ except urllib2.URLError as e:
+ reason = "url error [%s]" % e.reason
+ except socket.timeout as e:
+ reason = "socket timeout [%s]" % e
+ except Exception as e:
+ reason = "unexpected error [%s]" % e
+
+ time_taken = int(time.time() - start_time)
+ status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
+ time_taken,
+ max_wait, reason)
+ status_cb(status_msg)
+
+ if timeup(max_wait, start_time):
+ break
+
+ loop_n = loop_n + 1
+ LOG.debug("Please wait %s seconds while we wait to try again",
+ sleep_time)
+ time.sleep(sleep_time)
+
+ return False
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
new file mode 100644
index 00000000..0842594d
--- /dev/null
+++ b/cloudinit/user_data.py
@@ -0,0 +1,243 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Canonical Ltd.
+# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Scott Moser <scott.moser@canonical.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import os
+
+import email
+from email.mime.multipart import MIMEMultipart
+from email.mime.text import MIMEText
+from email.mime.base import MIMEBase
+
+from cloudinit import handlers
+from cloudinit import log as logging
+from cloudinit import url_helper
+from cloudinit import util
+
+LOG = logging.getLogger(__name__)
+
+# Constants copied in from the handler module
+NOT_MULTIPART_TYPE = handlers.NOT_MULTIPART_TYPE
+PART_FN_TPL = handlers.PART_FN_TPL
+OCTET_TYPE = handlers.OCTET_TYPE
+
+# Saves typing errors
+CONTENT_TYPE = 'Content-Type'
+
+# Various special content types that cause special actions
+TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
+INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
+ARCHIVE_TYPES = ["text/cloud-config-archive"]
+UNDEF_TYPE = "text/plain"
+ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+
+# Msg header used to track attachments
+ATTACHMENT_FIELD = 'Number-Attachments'
+
+
+class UserDataProcessor(object):
+ def __init__(self, paths):
+ self.paths = paths
+
+ def process(self, blob):
+ base_msg = convert_string(blob)
+ process_msg = MIMEMultipart()
+ self._process_msg(base_msg, process_msg)
+ return process_msg
+
+ def _process_msg(self, base_msg, append_msg):
+ for part in base_msg.walk():
+ # multipart/* are just containers
+ if part.get_content_maintype() == 'multipart':
+ continue
+
+ ctype = None
+ ctype_orig = part.get_content_type()
+ payload = part.get_payload(decode=True)
+
+ if not ctype_orig:
+ ctype_orig = UNDEF_TYPE
+
+ if ctype_orig in TYPE_NEEDED:
+ ctype = handlers.type_from_starts_with(payload)
+
+ if ctype is None:
+ ctype = ctype_orig
+
+ if ctype in INCLUDE_TYPES:
+ self._do_include(payload, append_msg)
+ continue
+
+ if ctype in ARCHIVE_TYPES:
+ self._explode_archive(payload, append_msg)
+ continue
+
+ if CONTENT_TYPE in base_msg:
+ base_msg.replace_header(CONTENT_TYPE, ctype)
+ else:
+ base_msg[CONTENT_TYPE] = ctype
+
+ self._attach_part(append_msg, part)
+
+ def _get_include_once_filename(self, entry):
+ entry_fn = util.hash_blob(entry, 'md5', 64)
+ return os.path.join(self.paths.get_ipath_cur('data'),
+ 'urlcache', entry_fn)
+
+ def _do_include(self, content, append_msg):
+ # Include a list of urls, one per line
+ # also support '#include <url here>'
+ # or #include-once '<url here>'
+ include_once_on = False
+ for line in content.splitlines():
+ lc_line = line.lower()
+ if lc_line.startswith("#include-once"):
+ line = line[len("#include-once"):].lstrip()
+ # Every following include will now
+ # not be refetched.... but will be
+ # re-read from a local urlcache (if it worked)
+ include_once_on = True
+ elif lc_line.startswith("#include"):
+ line = line[len("#include"):].lstrip()
+ # Disable the include once if it was on
+ # if it wasn't, then this has no effect.
+ include_once_on = False
+ if line.startswith("#"):
+ continue
+ include_url = line.strip()
+ if not include_url:
+ continue
+
+ include_once_fn = None
+ content = None
+ if include_once_on:
+ include_once_fn = self._get_include_once_filename(include_url)
+ if include_once_on and os.path.isfile(include_once_fn):
+ content = util.load_file(include_once_fn)
+ else:
+ resp = url_helper.readurl(include_url)
+ if include_once_on and resp.ok():
+ util.write_file(include_once_fn, str(resp), mode=0600)
+ if resp.ok():
+ content = str(resp)
+ else:
+ LOG.warn(("Fetching from %s resulted in"
+ " a invalid http code of %s"),
+ include_url, resp.code)
+
+ if content is not None:
+ new_msg = convert_string(content)
+ self._process_msg(new_msg, append_msg)
+
+ def _explode_archive(self, archive, append_msg):
+ entries = util.load_yaml(archive, default=[], allowed=[list, set])
+ for ent in entries:
+ # ent can be one of:
+ # dict { 'filename' : 'value', 'content' :
+ # 'value', 'type' : 'value' }
+ # filename and type not be present
+ # or
+ # scalar(payload)
+ if isinstance(ent, (str, basestring)):
+ ent = {'content': ent}
+ if not isinstance(ent, (dict)):
+ # TODO raise?
+ continue
+
+ content = ent.get('content', '')
+ mtype = ent.get('type')
+ if not mtype:
+ mtype = handlers.type_from_starts_with(content,
+ ARCHIVE_UNDEF_TYPE)
+
+ maintype, subtype = mtype.split('/', 1)
+ if maintype == "text":
+ msg = MIMEText(content, _subtype=subtype)
+ else:
+ msg = MIMEBase(maintype, subtype)
+ msg.set_payload(content)
+
+ if 'filename' in ent:
+ msg.add_header('Content-Disposition',
+ 'attachment', filename=ent['filename'])
+
+ for header in list(ent.keys()):
+ if header in ('content', 'filename', 'type'):
+ continue
+ msg.add_header(header, ent['header'])
+
+ self._attach_part(append_msg, msg)
+
+ def _multi_part_count(self, outer_msg, new_count=None):
+ """
+ Return the number of attachments to this MIMEMultipart by looking
+ at its 'Number-Attachments' header.
+ """
+ if ATTACHMENT_FIELD not in outer_msg:
+ outer_msg[ATTACHMENT_FIELD] = '0'
+
+ if new_count is not None:
+ outer_msg.replace_header(ATTACHMENT_FIELD, str(new_count))
+
+ fetched_count = 0
+ try:
+ fetched_count = int(outer_msg.get(ATTACHMENT_FIELD))
+ except (ValueError, TypeError):
+ outer_msg.replace_header(ATTACHMENT_FIELD, str(fetched_count))
+ return fetched_count
+
+ def _part_filename(self, _unnamed_part, count):
+ return PART_FN_TPL % (count + 1)
+
+ def _attach_part(self, outer_msg, part):
+ """
+ Attach an part to an outer message. outermsg must be a MIMEMultipart.
+ Modifies a header in the message to keep track of number of attachments.
+ """
+ cur_c = self._multi_part_count(outer_msg)
+ if not part.get_filename():
+ fn = self._part_filename(part, cur_c)
+ part.add_header('Content-Disposition',
+ 'attachment', filename=fn)
+ outer_msg.attach(part)
+ self._multi_part_count(outer_msg, cur_c + 1)
+
+
+# Coverts a raw string into a mime message
+def convert_string(raw_data, headers=None):
+ if not raw_data:
+ raw_data = ''
+ if not headers:
+ headers = {}
+ data = util.decomp_str(raw_data)
+ if "mime-version:" in data[0:4096].lower():
+ msg = email.message_from_string(data)
+ for (key, val) in headers.iteritems():
+ if key in msg:
+ msg.replace_header(key, val)
+ else:
+ msg[key] = val
+ else:
+ mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE)
+ maintype, subtype = mtype.split("/", 1)
+ msg = MIMEBase(maintype, subtype, *headers)
+ msg.set_payload(data)
+ return msg
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 84aae3ea..d7dd20b5 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1,10 +1,12 @@
# vi: ts=4 expandtab
#
-# Copyright (C) 2009-2010 Canonical Ltd.
+# Copyright (C) 2012 Canonical Ltd.
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Scott Moser <scott.moser@canonical.com>
-# Author: Juerg Hafliger <juerg.haefliger@hp.com>
+# Author: Juerg Haefliger <juerg.haefliger@hp.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -18,87 +20,314 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import yaml
+from StringIO import StringIO
+
+import copy as obj_copy
+import contextlib
+import errno
+import glob
+import grp
+import gzip
+import hashlib
import os
-import os.path
+import platform
+import pwd
+import random
import shutil
-import errno
-import subprocess
-from Cheetah.Template import Template
-import urllib2
-import urllib
-import logging
-import re
import socket
+import stat
+import string # pylint: disable=W0402
+import subprocess
import sys
-import time
import tempfile
-import traceback
+import time
+import types
import urlparse
-try:
- import selinux
- HAVE_LIBSELINUX = True
-except ImportError:
- HAVE_LIBSELINUX = False
+import yaml
+
+from cloudinit import importer
+from cloudinit import log as logging
+from cloudinit import url_helper as uhelp
+
+from cloudinit.settings import (CFG_BUILTIN)
+
+
+LOG = logging.getLogger(__name__)
+
+# Helps cleanup filenames to ensure they aren't FS incompatible
+FN_REPLACEMENTS = {
+ os.sep: '_',
+}
+FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
+
+# Helper utils to see if running in a container
+CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
+
+
+class ProcessExecutionError(IOError):
+
+ MESSAGE_TMPL = ('%(description)s\n'
+ 'Command: %(cmd)s\n'
+ 'Exit code: %(exit_code)s\n'
+ 'Reason: %(reason)s\n'
+ 'Stdout: %(stdout)r\n'
+ 'Stderr: %(stderr)r')
+
+ def __init__(self, stdout=None, stderr=None,
+ exit_code=None, cmd=None,
+ description=None, reason=None):
+ if not cmd:
+ self.cmd = '-'
+ else:
+ self.cmd = cmd
+
+ if not description:
+ self.description = 'Unexpected error while running command.'
+ else:
+ self.description = description
+
+ if not isinstance(exit_code, (long, int)):
+ self.exit_code = '-'
+ else:
+ self.exit_code = exit_code
+
+ if not stderr:
+ self.stderr = ''
+ else:
+ self.stderr = stderr
+
+ if not stdout:
+ self.stdout = ''
+ else:
+ self.stdout = stdout
+
+ if reason:
+ self.reason = reason
+ else:
+ self.reason = '-'
+
+ message = self.MESSAGE_TMPL % {
+ 'description': self.description,
+ 'cmd': self.cmd,
+ 'exit_code': self.exit_code,
+ 'stdout': self.stdout,
+ 'stderr': self.stderr,
+ 'reason': self.reason,
+ }
+ IOError.__init__(self, message)
+
+
+class SeLinuxGuard(object):
+ def __init__(self, path, recursive=False):
+ # Late import since it might not always
+ # be possible to use this
+ try:
+ self.selinux = importer.import_module('selinux')
+ except ImportError:
+ self.selinux = None
+ self.path = path
+ self.recursive = recursive
+
+ def __enter__(self):
+ if self.selinux:
+ return True
+ else:
+ return False
+
+ def __exit__(self, excp_type, excp_value, excp_traceback):
+ if self.selinux:
+ path = os.path.realpath(os.path.expanduser(self.path))
+ do_restore = False
+ try:
+ # See if even worth restoring??
+ stats = os.lstat(path)
+ if stat.ST_MODE in stats:
+ self.selinux.matchpathcon(path, stats[stat.ST_MODE])
+ do_restore = True
+ except OSError:
+ pass
+ if do_restore:
+ LOG.debug("Restoring selinux mode for %s (recursive=%s)",
+ path, self.recursive)
+ self.selinux.restorecon(path, recursive=self.recursive)
+
+
+class MountFailedError(Exception):
+ pass
+
+
+def ExtendedTemporaryFile(**kwargs):
+ fh = tempfile.NamedTemporaryFile(**kwargs)
+ # Replace its unlink with a quiet version
+ # that does not raise errors when the
+ # file to unlink has been unlinked elsewhere..
+ LOG.debug("Created temporary file %s", fh.name)
+ fh.unlink = del_file
+
+ # Add a new method that will unlink
+ # right 'now' but still lets the exit
+ # method attempt to remove it (which will
+ # not throw due to our del file being quiet
+ # about files that are not there)
+ def unlink_now():
+ fh.unlink(fh.name)
+
+ setattr(fh, 'unlink_now', unlink_now)
+ return fh
+
+
+def fork_cb(child_cb, *args):
+ fid = os.fork()
+ if fid == 0:
+ try:
+ child_cb(*args)
+ os._exit(0) # pylint: disable=W0212
+ except:
+ logexc(LOG, ("Failed forking and"
+ " calling callback %s"), obj_name(child_cb))
+ os._exit(1) # pylint: disable=W0212
+ else:
+ LOG.debug("Forked child %s who will run callback %s",
+ fid, obj_name(child_cb))
+
+
+def is_true(val, addons=None):
+ if isinstance(val, (bool)):
+ return val is True
+ check_set = ['true', '1', 'on', 'yes']
+ if addons:
+ check_set = check_set + addons
+ if str(val).lower().strip() in check_set:
+ return True
+ return False
+
+
+def is_false(val, addons=None):
+ if isinstance(val, (bool)):
+ return val is False
+ check_set = ['off', '0', 'no', 'false']
+ if addons:
+ check_set = check_set + addons
+ if str(val).lower().strip() in check_set:
+ return True
+ return False
+
+
+def translate_bool(val, addons=None):
+ if not val:
+ # This handles empty lists and false and
+ # other things that python believes are false
+ return False
+ # If its already a boolean skip
+ if isinstance(val, (bool)):
+ return val
+ return is_true(val, addons)
+
+
+def rand_str(strlen=32, select_from=None):
+ if not select_from:
+ select_from = string.letters + string.digits
+ return "".join([random.choice(select_from) for _x in range(0, strlen)])
def read_conf(fname):
try:
- stream = open(fname, "r")
- conf = yaml.safe_load(stream)
- stream.close()
- return conf
+ return load_yaml(load_file(fname), default={})
except IOError as e:
if e.errno == errno.ENOENT:
return {}
- raise
-
+ else:
+ raise
-def get_base_cfg(cfgfile, cfg_builtin="", parsed_cfgs=None):
- kerncfg = {}
- syscfg = {}
- if parsed_cfgs and cfgfile in parsed_cfgs:
- return(parsed_cfgs[cfgfile])
- syscfg = read_conf_with_confd(cfgfile)
+def clean_filename(fn):
+ for (k, v) in FN_REPLACEMENTS.iteritems():
+ fn = fn.replace(k, v)
+ removals = []
+ for k in fn:
+ if k not in FN_ALLOWED:
+ removals.append(k)
+ for k in removals:
+ fn = fn.replace(k, '')
+ fn = fn.strip()
+ return fn
- kern_contents = read_cc_from_cmdline()
- if kern_contents:
- kerncfg = yaml.safe_load(kern_contents)
- # kernel parameters override system config
- combined = mergedict(kerncfg, syscfg)
+def decomp_str(data):
+ try:
+ buf = StringIO(str(data))
+ with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
+ return gh.read()
+ except:
+ return data
+
+
+def find_modules(root_dir):
+ entries = dict()
+ for fname in glob.glob(os.path.join(root_dir, "*.py")):
+ if not os.path.isfile(fname):
+ continue
+ modname = os.path.basename(fname)[0:-3]
+ modname = modname.strip()
+ if modname and modname.find(".") == -1:
+ entries[fname] = modname
+ return entries
+
+
+def multi_log(text, console=True, stderr=True,
+ log=None, log_level=logging.DEBUG):
+ if stderr:
+ sys.stderr.write(text)
+ if console:
+ # Don't use the write_file since
+ # this might be 'sensitive' info (not debug worthy?)
+ with open('/dev/console', 'wb') as wfh:
+ wfh.write(text)
+ wfh.flush()
+ if log:
+ log.log(log_level, text)
+
+
+def is_ipv4(instr):
+ """ determine if input string is a ipv4 address. return boolean"""
+ toks = instr.split('.')
+ if len(toks) != 4:
+ return False
- if cfg_builtin:
- builtin = yaml.safe_load(cfg_builtin)
- fin = mergedict(combined, builtin)
- else:
- fin = combined
+ try:
+ toks = [x for x in toks if (int(x) < 256 and int(x) > 0)]
+ except:
+ return False
- if parsed_cfgs != None:
- parsed_cfgs[cfgfile] = fin
- return(fin)
+ return (len(toks) == 4)
def get_cfg_option_bool(yobj, key, default=False):
if key not in yobj:
return default
- val = yobj[key]
- if val is True:
- return True
- if str(val).lower() in ['true', '1', 'on', 'yes']:
- return True
- return False
+ return translate_bool(yobj[key])
def get_cfg_option_str(yobj, key, default=None):
if key not in yobj:
return default
- return yobj[key]
+ val = yobj[key]
+ if not isinstance(val, (str, basestring)):
+ val = str(val)
+ return val
-def get_cfg_option_list_or_str(yobj, key, default=None):
+def system_info():
+ return {
+ 'platform': platform.platform(),
+ 'release': platform.release(),
+ 'python': platform.python_version(),
+ 'uname': platform.uname(),
+ }
+
+
+def get_cfg_option_list(yobj, key, default=None):
"""
Gets the C{key} config option from C{yobj} as a list of strings. If the
key is present as a single string it will be returned as a list with one
@@ -114,9 +343,14 @@ def get_cfg_option_list_or_str(yobj, key, default=None):
return default
if yobj[key] is None:
return []
- if isinstance(yobj[key], list):
- return yobj[key]
- return [yobj[key]]
+ val = yobj[key]
+ if isinstance(val, (list)):
+ # Should we ensure they are all strings??
+ cval = [str(v) for v in val]
+ return cval
+ if not isinstance(val, (str, basestring)):
+ val = str(val)
+ return [val]
# get a cfg entry by its path array
@@ -125,18 +359,121 @@ def get_cfg_by_path(yobj, keyp, default=None):
cur = yobj
for tok in keyp:
if tok not in cur:
- return(default)
+ return default
cur = cur[tok]
- return(cur)
+ return cur
+
+
+def fixup_output(cfg, mode):
+ (outfmt, errfmt) = get_output_cfg(cfg, mode)
+ redirect_output(outfmt, errfmt)
+ return (outfmt, errfmt)
+
+
+# redirect_output(outfmt, errfmt, orig_out, orig_err)
+# replace orig_out and orig_err with filehandles specified in outfmt or errfmt
+# fmt can be:
+# > FILEPATH
+# >> FILEPATH
+# | program [ arg1 [ arg2 [ ... ] ] ]
+#
+# with a '|', arguments are passed to shell, so one level of
+# shell escape is required.
+def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
+ if not o_out:
+ o_out = sys.stdout
+ if not o_err:
+ o_err = sys.stderr
+
+ if outfmt:
+ LOG.debug("Redirecting %s to %s", o_out, outfmt)
+ (mode, arg) = outfmt.split(" ", 1)
+ if mode == ">" or mode == ">>":
+ owith = "ab"
+ if mode == ">":
+ owith = "wb"
+ new_fp = open(arg, owith)
+ elif mode == "|":
+ proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ new_fp = proc.stdin
+ else:
+ raise TypeError("Invalid type for output format: %s" % outfmt)
+
+ if o_out:
+ os.dup2(new_fp.fileno(), o_out.fileno())
+
+ if errfmt == outfmt:
+ LOG.debug("Redirecting %s to %s", o_err, outfmt)
+ os.dup2(new_fp.fileno(), o_err.fileno())
+ return
+
+ if errfmt:
+ LOG.debug("Redirecting %s to %s", o_err, errfmt)
+ (mode, arg) = errfmt.split(" ", 1)
+ if mode == ">" or mode == ">>":
+ owith = "ab"
+ if mode == ">":
+ owith = "wb"
+ new_fp = open(arg, owith)
+ elif mode == "|":
+ proc = subprocess.Popen(arg, shell=True, stdin=subprocess.PIPE)
+ new_fp = proc.stdin
+ else:
+ raise TypeError("Invalid type for error format: %s" % errfmt)
+
+ if o_err:
+ os.dup2(new_fp.fileno(), o_err.fileno())
+
+
+def make_url(scheme, host, port=None,
+ path='', params='', query='', fragment=''):
+
+ pieces = []
+ pieces.append(scheme or '')
+
+ netloc = ''
+ if host:
+ netloc = str(host)
+
+ if port is not None:
+ netloc += ":" + "%s" % (port)
+
+ pieces.append(netloc or '')
+ pieces.append(path or '')
+ pieces.append(params or '')
+ pieces.append(query or '')
+ pieces.append(fragment or '')
+
+ return urlparse.urlunparse(pieces)
+
+
+def obj_name(obj):
+ if isinstance(obj, (types.TypeType,
+ types.ModuleType,
+ types.FunctionType,
+ types.LambdaType)):
+ return str(obj.__name__)
+ return obj_name(obj.__class__)
+
+
+def mergemanydict(srcs, reverse=False):
+ if reverse:
+ srcs = reversed(srcs)
+ m_cfg = {}
+ for a_cfg in srcs:
+ if a_cfg:
+ m_cfg = mergedict(m_cfg, a_cfg)
+ return m_cfg
def mergedict(src, cand):
"""
- Merge values from C{cand} into C{src}. If C{src} has a key C{cand} will
- not override. Nested dictionaries are merged recursively.
+ Merge values from C{cand} into C{src}.
+ If C{src} has a key C{cand} will not override.
+ Nested dictionaries are merged recursively.
"""
if isinstance(src, dict) and isinstance(cand, dict):
- for k, v in cand.iteritems():
+ for (k, v) in cand.iteritems():
if k not in src:
src[k] = v
else:
@@ -144,104 +481,66 @@ def mergedict(src, cand):
return src
-def delete_dir_contents(dirname):
- """
- Deletes all contents of a directory without deleting the directory itself.
+@contextlib.contextmanager
+def chdir(ndir):
+ curr = os.getcwd()
+ try:
+ os.chdir(ndir)
+ yield ndir
+ finally:
+ os.chdir(curr)
- @param dirname: The directory whose contents should be deleted.
- """
- for node in os.listdir(dirname):
- node_fullpath = os.path.join(dirname, node)
- if os.path.isdir(node_fullpath):
- shutil.rmtree(node_fullpath)
- else:
- os.unlink(node_fullpath)
+@contextlib.contextmanager
+def umask(n_msk):
+ old = os.umask(n_msk)
+ try:
+ yield old
+ finally:
+ os.umask(old)
-def write_file(filename, content, mode=0644, omode="wb"):
- """
- Writes a file with the given content and sets the file mode as specified.
- Resotres the SELinux context if possible.
- @param filename: The full path of the file to write.
- @param content: The content to write to the file.
- @param mode: The filesystem mode to set on the file.
- @param omode: The open mode used when opening the file (r, rb, a, etc.)
- """
+@contextlib.contextmanager
+def tempdir(**kwargs):
+ # This seems like it was only added in python 3.2
+ # Make it since its useful...
+ # See: http://bugs.python.org/file12970/tempdir.patch
+ tdir = tempfile.mkdtemp(**kwargs)
try:
- os.makedirs(os.path.dirname(filename))
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise e
+ yield tdir
+ finally:
+ del_dir(tdir)
- f = open(filename, omode)
- if mode is not None:
- os.chmod(filename, mode)
- f.write(content)
- f.close()
- restorecon_if_possible(filename)
-
-
-def restorecon_if_possible(path, recursive=False):
- if HAVE_LIBSELINUX and selinux.is_selinux_enabled():
- selinux.restorecon(path, recursive=recursive)
-
-
-# get keyid from keyserver
-def getkeybyid(keyid, keyserver):
- shcmd = """
- k=${1} ks=${2};
- exec 2>/dev/null
- [ -n "$k" ] || exit 1;
- armour=$(gpg --list-keys --armour "${k}")
- if [ -z "${armour}" ]; then
- gpg --keyserver ${ks} --recv $k >/dev/null &&
- armour=$(gpg --export --armour "${k}") &&
- gpg --batch --yes --delete-keys "${k}"
- fi
- [ -n "${armour}" ] && echo "${armour}"
- """
- args = ['sh', '-c', shcmd, "export-gpg-keyid", keyid, keyserver]
- return(subp(args)[0])
+
+def center(text, fill, max_len):
+ return '{0:{fill}{align}{size}}'.format(text, fill=fill,
+ align="^", size=max_len)
+
+
+def del_dir(path):
+ LOG.debug("Recursively deleting %s", path)
+ shutil.rmtree(path)
def runparts(dirp, skip_no_exist=True):
if skip_no_exist and not os.path.isdir(dirp):
return
- failed = 0
+ failed = []
+ attempted = []
for exe_name in sorted(os.listdir(dirp)):
exe_path = os.path.join(dirp, exe_name)
if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK):
- popen = subprocess.Popen([exe_path])
- popen.communicate()
- if popen.returncode is not 0:
- failed += 1
- sys.stderr.write("failed: %s [%i]\n" %
- (exe_path, popen.returncode))
- if failed:
- raise RuntimeError('runparts: %i failures' % failed)
-
-
-def subp(args, input_=None):
- sp = subprocess.Popen(args, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE, stdin=subprocess.PIPE)
- out, err = sp.communicate(input_)
- if sp.returncode is not 0:
- raise subprocess.CalledProcessError(sp.returncode, args, (out, err))
- return(out, err)
-
-
-def render_to_file(template, outfile, searchList):
- t = Template(file='/etc/cloud/templates/%s.tmpl' % template,
- searchList=[searchList])
- f = open(outfile, 'w')
- f.write(t.respond())
- f.close()
-
+ attempted.append(exe_path)
+ try:
+ subp([exe_path])
+ except ProcessExecutionError as e:
+ logexc(LOG, "Failed running %s [%s]", exe_path, e.exit_code)
+ failed.append(e)
-def render_string(template, searchList):
- return(Template(template, searchList=[searchList]).respond())
+ if failed and attempted:
+ raise RuntimeError('Runparts: %s failures in %s attempted commands'
+ % (len(failed), len(attempted)))
# read_optional_seed
@@ -254,13 +553,39 @@ def read_optional_seed(fill, base="", ext="", timeout=5):
fill['user-data'] = ud
fill['meta-data'] = md
return True
- except OSError, e:
+ except OSError as e:
if e.errno == errno.ENOENT:
return False
raise
-# raise OSError with enoent if not found
+def read_file_or_url(url, timeout=5, retries=10, file_retries=0):
+ if url.startswith("/"):
+ url = "file://%s" % url
+ if url.startswith("file://"):
+ retries = file_retries
+ return uhelp.readurl(url, timeout=timeout, retries=retries)
+
+
+def load_yaml(blob, default=None, allowed=(dict,)):
+ loaded = default
+ try:
+ blob = str(blob)
+ LOG.debug(("Attempting to load yaml from string "
+ "of length %s with allowed root types %s"),
+ len(blob), allowed)
+ converted = yaml.safe_load(blob)
+ if not isinstance(converted, allowed):
+ # Yes this will just be caught, but thats ok for now...
+ raise TypeError(("Yaml load allows %s root types,"
+ " but got %s instead") %
+ (allowed, obj_name(converted)))
+ loaded = converted
+ except (yaml.YAMLError, TypeError, ValueError):
+ logexc(LOG, "Failed loading yaml blob")
+ return loaded
+
+
def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
if base.startswith("/"):
base = "file://%s" % base
@@ -276,139 +601,62 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
ud_url = "%s%s%s" % (base, "user-data", ext)
md_url = "%s%s%s" % (base, "meta-data", ext)
- no_exc = object()
- raise_err = no_exc
- for attempt in range(0, retries + 1):
- try:
- md_str = readurl(md_url, timeout=timeout)
- ud = readurl(ud_url, timeout=timeout)
- md = yaml.safe_load(md_str)
-
- return(md, ud)
- except urllib2.HTTPError as e:
- raise_err = e
- except urllib2.URLError as e:
- raise_err = e
- if (isinstance(e.reason, OSError) and
- e.reason.errno == errno.ENOENT):
- raise_err = e.reason
-
- if attempt == retries:
- break
-
- #print "%s failed, sleeping" % attempt
- time.sleep(1)
-
- raise(raise_err)
-
+ md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
+ md = None
+ if md_resp.ok():
+ md_str = str(md_resp)
+ md = load_yaml(md_str, default={})
-def logexc(log, lvl=logging.DEBUG):
- log.log(lvl, traceback.format_exc())
+ ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
+ ud = None
+ if ud_resp.ok():
+ ud_str = str(ud_resp)
+ ud = ud_str
-
-class RecursiveInclude(Exception):
- pass
-
-
-def read_file_with_includes(fname, rel=".", stack=None, patt=None):
- if stack is None:
- stack = []
- if not fname.startswith("/"):
- fname = os.sep.join((rel, fname))
-
- fname = os.path.realpath(fname)
-
- if fname in stack:
- raise(RecursiveInclude("%s recursively included" % fname))
- if len(stack) > 10:
- raise(RecursiveInclude("%s included, stack size = %i" %
- (fname, len(stack))))
-
- if patt == None:
- patt = re.compile("^#(opt_include|include)[ \t].*$", re.MULTILINE)
-
- try:
- fp = open(fname)
- contents = fp.read()
- fp.close()
- except:
- raise
-
- rel = os.path.dirname(fname)
- stack.append(fname)
-
- cur = 0
- while True:
- match = patt.search(contents[cur:])
- if not match:
- break
- loc = match.start() + cur
- endl = match.end() + cur
-
- (key, cur_fname) = contents[loc:endl].split(None, 2)
- cur_fname = cur_fname.strip()
-
- try:
- inc_contents = read_file_with_includes(cur_fname, rel, stack, patt)
- except IOError, e:
- if e.errno == errno.ENOENT and key == "#opt_include":
- inc_contents = ""
- else:
- raise
- contents = contents[0:loc] + inc_contents + contents[endl + 1:]
- cur = loc + len(inc_contents)
- stack.pop()
- return(contents)
+ return (md, ud)
def read_conf_d(confd):
- # get reverse sorted list (later trumps newer)
+ # Get reverse sorted list (later trumps newer)
confs = sorted(os.listdir(confd), reverse=True)
- # remove anything not ending in '.cfg'
+ # Remove anything not ending in '.cfg'
confs = [f for f in confs if f.endswith(".cfg")]
- # remove anything not a file
- confs = [f for f in confs if os.path.isfile("%s/%s" % (confd, f))]
+ # Remove anything not a file
+ confs = [f for f in confs
+ if os.path.isfile(os.path.join(confd, f))]
- cfg = {}
- for conf in confs:
- cfg = mergedict(cfg, read_conf("%s/%s" % (confd, conf)))
+ # Load them all so that they can be merged
+ cfgs = []
+ for fn in confs:
+ cfgs.append(read_conf(os.path.join(confd, fn)))
- return(cfg)
+ return mergemanydict(cfgs)
def read_conf_with_confd(cfgfile):
cfg = read_conf(cfgfile)
+
confd = False
if "conf_d" in cfg:
- if cfg['conf_d'] is not None:
- confd = cfg['conf_d']
- if not isinstance(confd, str):
- raise Exception("cfgfile %s contains 'conf_d' "
- "with non-string" % cfgfile)
+ confd = cfg['conf_d']
+ if confd:
+ if not isinstance(confd, (str, basestring)):
+ raise TypeError(("Config file %s contains 'conf_d' "
+ "with non-string type %s") %
+ (cfgfile, obj_name(confd)))
+ else:
+ confd = str(confd).strip()
elif os.path.isdir("%s.d" % cfgfile):
confd = "%s.d" % cfgfile
- if not confd:
- return(cfg)
+ if not confd or not os.path.isdir(confd):
+ return cfg
+ # Conf.d settings override input configuration
confd_cfg = read_conf_d(confd)
-
- return(mergedict(confd_cfg, cfg))
-
-
-def get_cmdline():
- if 'DEBUG_PROC_CMDLINE' in os.environ:
- cmdline = os.environ["DEBUG_PROC_CMDLINE"]
- else:
- try:
- cmdfp = open("/proc/cmdline")
- cmdline = cmdfp.read().strip()
- cmdfp.close()
- except:
- cmdline = ""
- return(cmdline)
+ return mergedict(confd_cfg, cfg)
def read_cc_from_cmdline(cmdline=None):
@@ -439,147 +687,15 @@ def read_cc_from_cmdline(cmdline=None):
begin = cmdline.find(tag_begin, end + end_l)
- return('\n'.join(tokens))
-
-
-def ensure_dirs(dirlist, mode=0755):
- fixmodes = []
- for d in dirlist:
- try:
- if mode != None:
- os.makedirs(d)
- else:
- os.makedirs(d, mode)
- except OSError as e:
- if e.errno != errno.EEXIST:
- raise
- if mode != None:
- fixmodes.append(d)
-
- for d in fixmodes:
- os.chmod(d, mode)
-
-
-def chownbyname(fname, user=None, group=None):
- uid = -1
- gid = -1
- if user == None and group == None:
- return
- if user:
- import pwd
- uid = pwd.getpwnam(user).pw_uid
- if group:
- import grp
- gid = grp.getgrnam(group).gr_gid
-
- os.chown(fname, uid, gid)
-
+ return '\n'.join(tokens)
-def readurl(url, data=None, timeout=None):
- openargs = {}
- if timeout != None:
- openargs['timeout'] = timeout
- if data is None:
- req = urllib2.Request(url)
- else:
- encoded = urllib.urlencode(data)
- req = urllib2.Request(url, encoded)
-
- response = urllib2.urlopen(req, **openargs)
- return(response.read())
-
-
-# shellify, takes a list of commands
-# for each entry in the list
-# if it is an array, shell protect it (with single ticks)
-# if it is a string, do nothing
-def shellify(cmdlist):
- content = "#!/bin/sh\n"
- escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
- for args in cmdlist:
- # if the item is a list, wrap all items in single tick
- # if its not, then just write it directly
- if isinstance(args, list):
- fixed = []
- for f in args:
- fixed.append("'%s'" % str(f).replace("'", escaped))
- content = "%s%s\n" % (content, ' '.join(fixed))
- else:
- content = "%s%s\n" % (content, str(args))
- return content
-
-
-def dos2unix(string):
+def dos2unix(contents):
# find first end of line
- pos = string.find('\n')
- if pos <= 0 or string[pos - 1] != '\r':
- return(string)
- return(string.replace('\r\n', '\n'))
-
-
-def is_container():
- # is this code running in a container of some sort
-
- for helper in ('running-in-container', 'lxc-is-container'):
- try:
- # try to run a helper program. if it returns true
- # then we're inside a container. otherwise, no
- sp = subprocess.Popen(helper, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- sp.communicate(None)
- return(sp.returncode == 0)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
- # this code is largely from the logic in
- # ubuntu's /etc/init/container-detect.conf
- try:
- # Detect old-style libvirt
- # Detect OpenVZ containers
- pid1env = get_proc_env(1)
- if "container" in pid1env:
- return True
-
- if "LIBVIRT_LXC_UUID" in pid1env:
- return True
-
- except IOError as e:
- if e.errno != errno.ENOENT:
- pass
-
- # Detect OpenVZ containers
- if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
- return True
-
- try:
- # Detect Vserver containers
- with open("/proc/self/status") as fp:
- lines = fp.read().splitlines()
- for line in lines:
- if line.startswith("VxID:"):
- (_key, val) = line.strip().split(":", 1)
- if val != "0":
- return True
- except IOError as e:
- if e.errno != errno.ENOENT:
- pass
-
- return False
-
-
-def get_proc_env(pid):
- # return the environment in a dict that a given process id was started with
- env = {}
- with open("/proc/%s/environ" % pid) as fp:
- toks = fp.read().split("\0")
- for tok in toks:
- if tok == "":
- continue
- (name, val) = tok.split("=", 1)
- env[name] = val
- return env
+ pos = contents.find('\n')
+ if pos <= 0 or contents[pos - 1] != '\r':
+ return contents
+ return contents.replace('\r\n', '\n')
def get_hostname_fqdn(cfg, cloud):
@@ -603,38 +719,72 @@ def get_hostname_fqdn(cfg, cloud):
hostname = cfg['hostname']
else:
hostname = cloud.get_hostname()
- return(hostname, fqdn)
+ return (hostname, fqdn)
def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
- # this parses /etc/hosts to get a fqdn. It should return the same
- # result as 'hostname -f <hostname>' if /etc/hosts.conf
- # did not have did not have 'bind' in the order attribute
+ """
+ For each host a single line should be present with
+ the following information:
+
+ IP_address canonical_hostname [aliases...]
+
+ Fields of the entry are separated by any number of blanks and/or tab
+ characters. Text from a "#" character until the end of the line is a
+ comment, and is ignored. Host names may contain only alphanumeric
+ characters, minus signs ("-"), and periods ("."). They must begin with
+ an alphabetic character and end with an alphanumeric character.
+ Optional aliases provide for name changes, alternate spellings, shorter
+ hostnames, or generic hostnames (for example, localhost).
+ """
fqdn = None
try:
- with open(filename, "r") as hfp:
- for line in hfp.readlines():
- hashpos = line.find("#")
- if hashpos >= 0:
- line = line[0:hashpos]
- toks = line.split()
-
- # if there there is less than 3 entries (ip, canonical, alias)
- # then ignore this line
- if len(toks) < 3:
- continue
-
- if hostname in toks[2:]:
- fqdn = toks[1]
- break
- hfp.close()
- except IOError as e:
- if e.errno == errno.ENOENT:
- pass
+ for line in load_file(filename).splitlines():
+ hashpos = line.find("#")
+ if hashpos >= 0:
+ line = line[0:hashpos]
+ line = line.strip()
+ if not line:
+ continue
+ # If there there is less than 3 entries
+ # (IP_address, canonical_hostname, alias)
+ # then ignore this line
+ toks = line.split()
+ if len(toks) < 3:
+ continue
+
+ if hostname in toks[2:]:
+ fqdn = toks[1]
+ break
+ except IOError:
+ pass
return fqdn
+def get_cmdline_url(names=('cloud-config-url', 'url'),
+ starts="#cloud-config", cmdline=None):
+ if cmdline is None:
+ cmdline = get_cmdline()
+
+ data = keyval_str_to_dict(cmdline)
+ url = None
+ key = None
+ for key in names:
+ if key in data:
+ url = data[key]
+ break
+
+ if not url:
+ return (None, None, None)
+
+ resp = uhelp.readurl(url)
+ if resp.contents.startswith(starts) and resp.ok():
+ return (key, url, str(resp))
+
+ return (key, url, None)
+
+
def is_resolvable(name):
""" determine if a url is resolvable, return a boolean """
try:
@@ -644,9 +794,14 @@ def is_resolvable(name):
return False
+def get_hostname():
+ hostname = socket.gethostname()
+ return hostname
+
+
def is_resolvable_url(url):
""" determine if this url is resolvable (existing or ip) """
- return(is_resolvable(urlparse.urlparse(url).hostname))
+ return (is_resolvable(urlparse.urlparse(url).hostname))
def search_for_mirror(candidates):
@@ -656,8 +811,7 @@ def search_for_mirror(candidates):
if is_resolvable_url(cand):
return cand
except Exception:
- raise
-
+ pass
return None
@@ -669,13 +823,14 @@ def close_stdin():
if _CLOUD_INIT_SAVE_STDIN is set in environment to a non empty or '0' value
then input will not be closed (only useful potentially for debugging).
"""
- if os.environ.get("_CLOUD_INIT_SAVE_STDIN") in ("", "0", False):
+ if os.environ.get("_CLOUD_INIT_SAVE_STDIN") in ("", "0", 'False'):
return
with open(os.devnull) as fp:
os.dup2(fp.fileno(), sys.stdin.fileno())
-def find_devs_with(criteria):
+def find_devs_with(criteria=None, oformat='device',
+ tag=None, no_cache=False, path=None):
"""
find devices matching given criteria (via blkid)
criteria can be *one* of:
@@ -683,165 +838,555 @@ def find_devs_with(criteria):
LABEL=<label>
UUID=<uuid>
"""
+ blk_id_cmd = ['blkid']
+ options = []
+ if criteria:
+ # Search for block devices with tokens named NAME that
+ # have the value 'value' and display any devices which are found.
+ # Common values for NAME include TYPE, LABEL, and UUID.
+ # If there are no devices specified on the command line,
+ # all block devices will be searched; otherwise,
+ # only search the devices specified by the user.
+ options.append("-t%s" % (criteria))
+ if tag:
+ # For each (specified) device, show only the tags that match tag.
+ options.append("-s%s" % (tag))
+ if no_cache:
+ # If you want to start with a clean cache
+ # (i.e. don't report devices previously scanned
+ # but not necessarily available at this time), specify /dev/null.
+ options.extend(["-c", "/dev/null"])
+ if oformat:
+ # Display blkid's output using the specified format.
+ # The format parameter may be:
+ # full, value, list, device, udev, export
+ options.append('-o%s' % (oformat))
+ if path:
+ options.append(path)
+ cmd = blk_id_cmd + options
+ # See man blkid for why 2 is added
+ (out, _err) = subp(cmd, rcs=[0, 2])
+ entries = []
+ for line in out.splitlines():
+ line = line.strip()
+ if line:
+ entries.append(line)
+ return entries
+
+
+def load_file(fname, read_cb=None, quiet=False):
+ LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
+ ofh = StringIO()
try:
- (out, _err) = subp(['blkid', '-t%s' % criteria, '-odevice'])
- except subprocess.CalledProcessError:
- return([])
- return(str(out).splitlines())
+ with open(fname, 'rb') as ifh:
+ pipe_in_out(ifh, ofh, chunk_cb=read_cb)
+ except IOError as e:
+ if not quiet:
+ raise
+ if e.errno != errno.ENOENT:
+ raise
+ contents = ofh.getvalue()
+ LOG.debug("Read %s bytes from %s", len(contents), fname)
+ return contents
-class mountFailedError(Exception):
- pass
+def get_cmdline():
+ if 'DEBUG_PROC_CMDLINE' in os.environ:
+ cmdline = os.environ["DEBUG_PROC_CMDLINE"]
+ else:
+ try:
+ cmdline = load_file("/proc/cmdline").strip()
+ except:
+ cmdline = ""
+ return cmdline
-def mount_callback_umount(device, callback, data=None):
+def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
+ bytes_piped = 0
+ while True:
+ data = in_fh.read(chunk_size)
+ if data == '':
+ break
+ else:
+ out_fh.write(data)
+ bytes_piped += len(data)
+ if chunk_cb:
+ chunk_cb(bytes_piped)
+ out_fh.flush()
+ return bytes_piped
+
+
+def chownbyid(fname, uid=None, gid=None):
+ if uid in [None, -1] and gid in [None, -1]:
+ # Nothing to do
+ return
+ LOG.debug("Changing the ownership of %s to %s:%s", fname, uid, gid)
+ os.chown(fname, uid, gid)
+
+
+def chownbyname(fname, user=None, group=None):
+ uid = -1
+ gid = -1
+ try:
+ if user:
+ uid = pwd.getpwnam(user).pw_uid
+ if group:
+ gid = grp.getgrnam(group).gr_gid
+ except KeyError:
+ logexc(LOG, ("Failed changing the ownership of %s using username %s and"
+ " groupname %s (do they exist?)"), fname, user, group)
+ return False
+ chownbyid(fname, uid, gid)
+ return True
+
+
+# Always returns well formated values
+# cfg is expected to have an entry 'output' in it, which is a dictionary
+# that includes entries for 'init', 'config', 'final' or 'all'
+# init: /var/log/cloud.out
+# config: [ ">> /var/log/cloud-config.out", /var/log/cloud-config.err ]
+# final:
+# output: "| logger -p"
+# error: "> /dev/null"
+# this returns the specific 'mode' entry, cleanly formatted, with value
+def get_output_cfg(cfg, mode):
+ ret = [None, None]
+ if not cfg or not 'output' in cfg:
+ return ret
+
+ outcfg = cfg['output']
+ if mode in outcfg:
+ modecfg = outcfg[mode]
+ else:
+ if 'all' not in outcfg:
+ return ret
+ # if there is a 'all' item in the output list
+ # then it applies to all users of this (init, config, final)
+ modecfg = outcfg['all']
+
+ # if value is a string, it specifies stdout and stderr
+ if isinstance(modecfg, str):
+ ret = [modecfg, modecfg]
+
+ # if its a list, then we expect (stdout, stderr)
+ if isinstance(modecfg, list):
+ if len(modecfg) > 0:
+ ret[0] = modecfg[0]
+ if len(modecfg) > 1:
+ ret[1] = modecfg[1]
+
+ # if it is a dictionary, expect 'out' and 'error'
+ # items, which indicate out and error
+ if isinstance(modecfg, dict):
+ if 'output' in modecfg:
+ ret[0] = modecfg['output']
+ if 'error' in modecfg:
+ ret[1] = modecfg['error']
+
+ # if err's entry == "&1", then make it same as stdout
+ # as in shell syntax of "echo foo >/dev/null 2>&1"
+ if ret[1] == "&1":
+ ret[1] = ret[0]
+
+ swlist = [">>", ">", "|"]
+ for i in range(len(ret)):
+ if not ret[i]:
+ continue
+ val = ret[i].lstrip()
+ found = False
+ for s in swlist:
+ if val.startswith(s):
+ val = "%s %s" % (s, val[len(s):].strip())
+ found = True
+ break
+ if not found:
+ # default behavior is append
+ val = "%s %s" % (">>", val.strip())
+ ret[i] = val
+
+ return ret
+
+
+def logexc(log, msg, *args):
+ # Setting this here allows this to change
+ # levels easily (not always error level)
+ # or even desirable to have that much junk
+ # coming out to a non-debug stream
+ if msg:
+ log.warn(msg, *args)
+ # Debug gets the full trace
+ log.debug(msg, exc_info=1, *args)
+
+
+def hash_blob(blob, routine, mlen=None):
+ hasher = hashlib.new(routine)
+ hasher.update(blob)
+ digest = hasher.hexdigest()
+ # Don't get to long now
+ if mlen is not None:
+ return digest[0:mlen]
+ else:
+ return digest
+
+
+def rename(src, dest):
+ LOG.debug("Renaming %s to %s", src, dest)
+ # TODO use a se guard here??
+ os.rename(src, dest)
+
+
+def ensure_dirs(dirlist, mode=0755):
+ for d in dirlist:
+ ensure_dir(d, mode)
+
+
+def read_write_cmdline_url(target_fn):
+ if not os.path.exists(target_fn):
+ try:
+ (key, url, content) = get_cmdline_url()
+ except:
+ logexc(LOG, "Failed fetching command line url")
+ return
+ try:
+ if key and content:
+ write_file(target_fn, content, mode=0600)
+ LOG.debug(("Wrote to %s with contents of command line"
+ " url %s (len=%s)"), target_fn, url, len(content))
+ elif key and not content:
+ LOG.debug(("Command line key %s with url"
+ " %s had no contents"), key, url)
+ except:
+ logexc(LOG, "Failed writing url content to %s", target_fn)
+
+
+def yaml_dumps(obj):
+ formatted = yaml.dump(obj,
+ line_break="\n",
+ indent=4,
+ explicit_start=True,
+ explicit_end=True,
+ default_flow_style=False,
+ )
+ return formatted
+
+
+def ensure_dir(path, mode=None):
+ if not os.path.isdir(path):
+ # Make the dir and adjust the mode
+ with SeLinuxGuard(os.path.dirname(path), recursive=True):
+ os.makedirs(path)
+ chmod(path, mode)
+ else:
+ # Just adjust the mode
+ chmod(path, mode)
+
+
+@contextlib.contextmanager
+def unmounter(umount):
+ try:
+ yield umount
+ finally:
+ if umount:
+ umount_cmd = ["umount", '-l', umount]
+ subp(umount_cmd)
+
+
+def mounts():
+ mounted = {}
+ try:
+ # Go through mounts to see what is already mounted
+ mount_locs = load_file("/proc/mounts").splitlines()
+ for mpline in mount_locs:
+ # Format at: man fstab
+ try:
+ (dev, mp, fstype, opts, _freq, _passno) = mpline.split()
+ except:
+ continue
+ # If the name of the mount point contains spaces these
+ # can be escaped as '\040', so undo that..
+ mp = mp.replace("\\040", " ")
+ mounted[dev] = {
+ 'fstype': fstype,
+ 'mountpoint': mp,
+ 'opts': opts,
+ }
+ LOG.debug("Fetched %s mounts from %s", mounted, "/proc/mounts")
+ except (IOError, OSError):
+ logexc(LOG, "Failed fetching mount points from /proc/mounts")
+ return mounted
+
+
+def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
"""
- mount the device, call method 'callback' passing the directory
+ Mount the device, call method 'callback' passing the directory
in which it was mounted, then unmount. Return whatever 'callback'
returned. If data != None, also pass data to callback.
"""
-
- def _cleanup(umount, tmpd):
- if umount:
+ mounted = mounts()
+ with tempdir() as tmpd:
+ umount = False
+ if device in mounted:
+ mountpoint = mounted[device]['mountpoint']
+ else:
try:
- subp(["umount", '-l', umount])
- except subprocess.CalledProcessError:
- raise
- if tmpd:
- os.rmdir(tmpd)
+ mountcmd = ['mount']
+ mountopts = []
+ if rw:
+ mountopts.append('rw')
+ else:
+ mountopts.append('ro')
+ if sync:
+ # This seems like the safe approach to do
+ # (ie where this is on by default)
+ mountopts.append("sync")
+ if mountopts:
+ mountcmd.extend(["-o", ",".join(mountopts)])
+ if mtype:
+ mountcmd.extend(['-t', mtype])
+ mountcmd.append(device)
+ mountcmd.append(tmpd)
+ subp(mountcmd)
+ umount = tmpd # This forces it to be unmounted (when set)
+ mountpoint = tmpd
+ except (IOError, OSError) as exc:
+ raise MountFailedError(("Failed mounting %s "
+ "to %s due to: %s") %
+ (device, tmpd, exc))
+ # Be nice and ensure it ends with a slash
+ if not mountpoint.endswith("/"):
+ mountpoint += "/"
+ with unmounter(umount):
+ if data is None:
+ ret = callback(mountpoint)
+ else:
+ ret = callback(mountpoint, data)
+ return ret
- # go through mounts to see if it was already mounted
- fp = open("/proc/mounts")
- mounts = fp.readlines()
- fp.close()
- tmpd = None
+def get_builtin_cfg():
+ # Deep copy so that others can't modify
+ return obj_copy.deepcopy(CFG_BUILTIN)
- mounted = {}
- for mpline in mounts:
- (dev, mp, fstype, _opts, _freq, _passno) = mpline.split()
- mp = mp.replace("\\040", " ")
- mounted[dev] = (dev, fstype, mp, False)
-
- umount = False
- if device in mounted:
- mountpoint = "%s/" % mounted[device][2]
- else:
- tmpd = tempfile.mkdtemp()
- mountcmd = ["mount", "-o", "ro", device, tmpd]
+def sym_link(source, link):
+ LOG.debug("Creating symbolic link from %r => %r" % (link, source))
+ os.symlink(source, link)
- try:
- (_out, _err) = subp(mountcmd)
- umount = tmpd
- except subprocess.CalledProcessError as exc:
- _cleanup(umount, tmpd)
- raise mountFailedError(exc.output[1])
- mountpoint = "%s/" % tmpd
+def del_file(path):
+ LOG.debug("Attempting to remove %s", path)
+ try:
+ os.unlink(path)
+ except OSError as e:
+ if e.errno != errno.ENOENT:
+ raise e
+
+def copy(src, dest):
+ LOG.debug("Copying %s to %s", src, dest)
+ shutil.copy(src, dest)
+
+
+def time_rfc2822():
try:
- if data == None:
- ret = callback(mountpoint)
- else:
- ret = callback(mountpoint, data)
+ ts = time.strftime("%a, %d %b %Y %H:%M:%S %z", time.gmtime())
+ except:
+ ts = "??"
+ return ts
- except Exception as exc:
- _cleanup(umount, tmpd)
- raise exc
- _cleanup(umount, tmpd)
+def uptime():
+ uptime_str = '??'
+ try:
+ contents = load_file("/proc/uptime").strip()
+ if contents:
+ uptime_str = contents.split()[0]
+ except:
+ logexc(LOG, "Unable to read uptime from /proc/uptime")
+ return uptime_str
+
- return(ret)
+def ensure_file(path, mode=0644):
+ write_file(path, content='', omode="ab", mode=mode)
-def wait_for_url(urls, max_wait=None, timeout=None,
- status_cb=None, headers_cb=None):
+def chmod(path, mode):
+ real_mode = None
+ try:
+ real_mode = int(mode)
+ except (ValueError, TypeError):
+ pass
+ if path and real_mode:
+ with SeLinuxGuard(path):
+ os.chmod(path, real_mode)
+
+
+def write_file(filename, content, mode=0644, omode="wb"):
"""
- urls: a list of urls to try
- max_wait: roughly the maximum time to wait before giving up
- The max time is *actually* len(urls)*timeout as each url will
- be tried once and given the timeout provided.
- timeout: the timeout provided to urllib2.urlopen
- status_cb: call method with string message when a url is not available
- headers_cb: call method with single argument of url to get headers
- for request.
-
- the idea of this routine is to wait for the EC2 metdata service to
- come up. On both Eucalyptus and EC2 we have seen the case where
- the instance hit the MD before the MD service was up. EC2 seems
- to have permenantely fixed this, though.
-
- In openstack, the metadata service might be painfully slow, and
- unable to avoid hitting a timeout of even up to 10 seconds or more
- (LP: #894279) for a simple GET.
-
- Offset those needs with the need to not hang forever (and block boot)
- on a system where cloud-init is configured to look for EC2 Metadata
- service but is not going to find one. It is possible that the instance
- data host (169.254.169.254) may be firewalled off Entirely for a sytem,
- meaning that the connection will block forever unless a timeout is set.
+ Writes a file with the given content and sets the file mode as specified.
+ Resotres the SELinux context if possible.
+
+ @param filename: The full path of the file to write.
+ @param content: The content to write to the file.
+ @param mode: The filesystem mode to set on the file.
+ @param omode: The open mode used when opening the file (r, rb, a, etc.)
"""
- starttime = time.time()
+ ensure_dir(os.path.dirname(filename))
+ LOG.debug("Writing to %s - %s: [%s] %s bytes",
+ filename, omode, mode, len(content))
+ with SeLinuxGuard(path=filename):
+ with open(filename, omode) as fh:
+ fh.write(content)
+ fh.flush()
+ chmod(filename, mode)
- sleeptime = 1
- def nullstatus_cb(msg):
- return
+def delete_dir_contents(dirname):
+ """
+ Deletes all contents of a directory without deleting the directory itself.
+
+ @param dirname: The directory whose contents should be deleted.
+ """
+ for node in os.listdir(dirname):
+ node_fullpath = os.path.join(dirname, node)
+ if os.path.isdir(node_fullpath):
+ del_dir(node_fullpath)
+ else:
+ del_file(node_fullpath)
- if status_cb == None:
- status_cb = nullstatus_cb
- def timeup(max_wait, starttime):
- return((max_wait <= 0 or max_wait == None) or
- (time.time() - starttime > max_wait))
+def subp(args, data=None, rcs=None, env=None, capture=True, shell=False):
+ if rcs is None:
+ rcs = [0]
+ try:
+ LOG.debug(("Running command %s with allowed return codes %s"
+ " (shell=%s, capture=%s)"), args, rcs, shell, capture)
+ if not capture:
+ stdout = None
+ stderr = None
+ else:
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ stdin = subprocess.PIPE
+ sp = subprocess.Popen(args, stdout=stdout,
+ stderr=stderr, stdin=stdin,
+ env=env, shell=shell)
+ (out, err) = sp.communicate(data)
+ except OSError as e:
+ raise ProcessExecutionError(cmd=args, reason=e)
+ rc = sp.returncode
+ if rc not in rcs:
+ raise ProcessExecutionError(stdout=out, stderr=err,
+ exit_code=rc,
+ cmd=args)
+ # Just ensure blank instead of none?? (iff capturing)
+ if not out and capture:
+ out = ''
+ if not err and capture:
+ err = ''
+ return (out, err)
- loop_n = 0
- while True:
- sleeptime = int(loop_n / 5) + 1
- for url in urls:
- now = time.time()
- if loop_n != 0:
- if timeup(max_wait, starttime):
- break
- if timeout and (now + timeout > (starttime + max_wait)):
- # shorten timeout to not run way over max_time
- timeout = int((starttime + max_wait) - now)
-
- reason = ""
- try:
- if headers_cb != None:
- headers = headers_cb(url)
- else:
- headers = {}
-
- req = urllib2.Request(url, data=None, headers=headers)
- resp = urllib2.urlopen(req, timeout=timeout)
- if resp.read() != "":
- return url
- reason = "empty data [%s]" % resp.getcode()
- except urllib2.HTTPError as e:
- reason = "http error [%s]" % e.code
- except urllib2.URLError as e:
- reason = "url error [%s]" % e.reason
- except socket.timeout as e:
- reason = "socket timeout [%s]" % e
- except Exception as e:
- reason = "unexpected error [%s]" % e
-
- status_cb("'%s' failed [%s/%ss]: %s" %
- (url, int(time.time() - starttime), max_wait,
- reason))
-
- if timeup(max_wait, starttime):
- break
- loop_n = loop_n + 1
- time.sleep(sleeptime)
+def abs_join(*paths):
+ return os.path.abspath(os.path.join(*paths))
+
+
+# shellify, takes a list of commands
+# for each entry in the list
+# if it is an array, shell protect it (with single ticks)
+# if it is a string, do nothing
+def shellify(cmdlist, add_header=True):
+ content = ''
+ if add_header:
+ content += "#!/bin/sh\n"
+ escaped = "%s%s%s%s" % ("'", '\\', "'", "'")
+ cmds_made = 0
+ for args in cmdlist:
+ # If the item is a list, wrap all items in single tick.
+ # If its not, then just write it directly.
+ if isinstance(args, list):
+ fixed = []
+ for f in args:
+ fixed.append("'%s'" % (str(f).replace("'", escaped)))
+ content = "%s%s\n" % (content, ' '.join(fixed))
+ cmds_made += 1
+ elif isinstance(args, (str, basestring)):
+ content = "%s%s\n" % (content, args)
+ cmds_made += 1
+ else:
+ raise RuntimeError(("Unable to shellify type %s"
+ " which is not a list or string")
+ % (obj_name(args)))
+ LOG.debug("Shellified %s commands.", cmds_made)
+ return content
+
+
+def is_container():
+ """
+ Checks to see if this code running in a container of some sort
+ """
+
+ for helper in CONTAINER_TESTS:
+ try:
+ # try to run a helper program. if it returns true/zero
+ # then we're inside a container. otherwise, no
+ subp([helper])
+ return True
+ except (IOError, OSError):
+ pass
+
+ # this code is largely from the logic in
+ # ubuntu's /etc/init/container-detect.conf
+ try:
+ # Detect old-style libvirt
+ # Detect OpenVZ containers
+ pid1env = get_proc_env(1)
+ if "container" in pid1env:
+ return True
+ if "LIBVIRT_LXC_UUID" in pid1env:
+ return True
+ except (IOError, OSError):
+ pass
+
+ # Detect OpenVZ containers
+ if os.path.isdir("/proc/vz") and not os.path.isdir("/proc/bc"):
+ return True
+
+ try:
+ # Detect Vserver containers
+ lines = load_file("/proc/self/status").splitlines()
+ for line in lines:
+ if line.startswith("VxID:"):
+ (_key, val) = line.strip().split(":", 1)
+ if val != "0":
+ return True
+ except (IOError, OSError):
+ pass
return False
+def get_proc_env(pid):
+ """
+ Return the environment in a dict that a given process id was started with.
+ """
+
+ env = {}
+ fn = os.path.join("/proc/", str(pid), "environ")
+ try:
+ contents = load_file(fn)
+ toks = contents.split("\0")
+ for tok in toks:
+ if tok == "":
+ continue
+ (name, val) = tok.split("=", 1)
+ if name:
+ env[name] = val
+ except (IOError, OSError):
+ pass
+ return env
+
+
def keyval_str_to_dict(kvstring):
ret = {}
for tok in kvstring.split():
@@ -851,5 +1396,4 @@ def keyval_str_to_dict(kvstring):
key = tok
val = True
ret[key] = val
-
- return(ret)
+ return ret
diff --git a/cloudinit/version.py b/cloudinit/version.py
new file mode 100644
index 00000000..4599910c
--- /dev/null
+++ b/cloudinit/version.py
@@ -0,0 +1,27 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from distutils import version as vr
+
+
+def version():
+ return vr.StrictVersion("0.7.0")
+
+
+def version_string():
+ return str(version())
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 3688f88d..5dae4047 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -1,8 +1,24 @@
+# The top level settings are used as module
+# and system configuration.
+
+# This user will have its password adjusted
user: ubuntu
-disable_root: 1
-preserve_hostname: False
-# datasource_list: ["NoCloud", "ConfigDrive", "OVF", "MAAS", "Ec2", "CloudStack"]
+# If this is set, 'root' will not be able to ssh in and they
+# will get a message to login instead as the above $user (ubuntu)
+disable_root: true
+
+# This will cause the set+update hostname module to not operate (if true)
+preserve_hostname: false
+
+# Example datasource config
+# datasource:
+# Ec2:
+# metadata_urls: [ 'blah.com' ]
+# timeout: 5 # (defaults to 50 seconds)
+# max_wait: 10 # (defaults to 120 seconds)
+
+# The modules that run in the 'init' stage
cloud_init_modules:
- bootcmd
- resizefs
@@ -13,6 +29,7 @@ cloud_init_modules:
- rsyslog
- ssh
+# The modules that run in the 'config' stage
cloud_config_modules:
- mounts
- ssh-import-id
@@ -31,6 +48,7 @@ cloud_config_modules:
- runcmd
- byobu
+# The modules that run in the 'final' stage
cloud_final_modules:
- rightscale_userdata
- scripts-per-once
@@ -40,3 +58,17 @@ cloud_final_modules:
- keys-to-console
- phone-home
- final-message
+
+# System and/or distro specific settings
+# (not accessible to handlers/transforms)
+system_info:
+ # This will affect which distro class gets used
+ distro: ubuntu
+ # Other config here will be given to the distro class and/or path classes
+ paths:
+ cloud_dir: /var/lib/cloud/
+ templates_dir: /etc/cloud/templates/
+ upstart_dir: /etc/init/
+ package_mirror: http://archive.ubuntu.com/ubuntu
+ availability_zone_template: http://%(zone)s.ec2.archive.ubuntu.com/ubuntu/
+ ssh_svcname: ssh
diff --git a/config/cloud.cfg.d/05_logging.cfg b/config/cloud.cfg.d/05_logging.cfg
index 2e7ac2ed..410a0650 100644
--- a/config/cloud.cfg.d/05_logging.cfg
+++ b/config/cloud.cfg.d/05_logging.cfg
@@ -1,4 +1,4 @@
-## this yaml formated config file handles setting
+## This yaml formated config file handles setting
## logger information. The values that are necessary to be set
## are seen at the bottom. The top '_log' are only used to remove
## redundency in a syslog and fallback-to-file case.
@@ -53,5 +53,9 @@ _log:
args=("/dev/log", handlers.SysLogHandler.LOG_USER)
log_cfgs:
+# These will be joined into a string that defines the configuration
- [ *log_base, *log_syslog ]
+# These will be joined into a string that defines the configuration
- [ *log_base, *log_file ]
+# A file path can also be used
+# - /etc/log.conf
diff --git a/debian.trunk/rules b/debian.trunk/rules
deleted file mode 100755
index 19384687..00000000
--- a/debian.trunk/rules
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/make -f
-
-DEB_PYTHON2_MODULE_PACKAGES = cloud-init
-
-binary-install/cloud-init::cloud-init-fixups
-
-include /usr/share/cdbs/1/rules/debhelper.mk
-include /usr/share/cdbs/1/class/python-distutils.mk
-
-DEB_DH_INSTALL_SOURCEDIR := debian/tmp
-
-cloud-init-fixups:
- for x in $(DEB_DESTDIR)/usr/bin/*.py; do mv "$$x" "$${x%.py}"; done
- install -d $(DEB_DESTDIR)/etc/rsyslog.d
- cp tools/21-cloudinit.conf $(DEB_DESTDIR)/etc/rsyslog.d/21-cloudinit.conf
- ln -sf cloud-init-per $(DEB_DESTDIR)/usr/bin/cloud-init-run-module
-
-# You only need to run this immediately after checking out the package from
-# revision control.
-# http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=572204
-quilt-setup:
- @[ ! -d .pc ] || { echo ".pc exists. remove it and re-run to start fresh"; exit 1; }
- set -e; for patch in $$(quilt series | tac); do \
- patch -p1 -R --no-backup-if-mismatch <"debian/patches/$$patch"; \
- done
- quilt push -a
-
-.PHONY: quilt-setup
-
diff --git a/install.sh b/install.sh
deleted file mode 100755
index e7521bfa..00000000
--- a/install.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-
-# cd $(DEB_SRCDIR) && $(call cdbs_python_binary,python$(cdbs_python_compile_version)) $(DEB_PYTHON_SETUP_CMD) install --root=$(cdbs_python_destdir) $(DEB_PYTHON_INSTALL_ARGS_ALL)
-# for ddir in $(cdbs_python_destdir)/usr/lib/python?.?/dist-packages; do \
-# [ -d $$ddir ] || continue; \
-# sdir=$$(dirname $$ddir)/site-packages; \
-# mkdir -p $$sdir; \
-# tar -c -f - -C $$ddir . | tar -x -f - -C $$sdir; \
-# rm -rf $$ddir; \
-# done
-
-DEB_PYTHON_INSTALL_ARGS_ALL="-O0 --install-layout=deb"
-rm -Rf build
-
-destdir=$(readlink -f ${1})
-[ -z "${destdir}" ] && { echo "give destdir"; exit 1; }
-cd $(dirname ${0})
-./setup.py install --root=${destdir} ${DEB_PYTHON_INSTALL_ARGS_ALL}
-
-#mkdir -p ${destdir}/usr/share/pyshared
-#for x in ${destdir}/usr/lib/python2.6/dist-packages/*; do
-# [ -d "$x" ] || continue
-# [ ! -d "${destdir}/usr/share/pyshared/${x##*/}" ] ||
-# rm -Rf "${destdir}/usr/share/pyshared/${x##*/}"
-# mv $x ${destdir}/usr/share/pyshared
-#done
-#rm -Rf ${destdir}/usr/lib/python2.6
-
-for x in "${destdir}/usr/bin/"*.py; do
- [ -f "${x}" ] && mv "${x}" "${x%.py}"
-done
diff --git a/packages/bddeb b/packages/bddeb
new file mode 100755
index 00000000..10ad08b3
--- /dev/null
+++ b/packages/bddeb
@@ -0,0 +1,172 @@
+#!/usr/bin/python
+
+import os
+import shutil
+import sys
+import glob
+
+# Use the util functions from cloudinit
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+from cloudinit import templater
+from cloudinit import util
+
+import argparse
+
+# Package names that will showup in requires to what we can actually
+# use in our debian 'control' file
+PKG_MP = {
+ 'tempita': 'python-tempita',
+ 'boto': 'python-boto',
+ 'configobj': 'python-configobj',
+ 'oauth': 'python-oauth',
+ 'yaml': 'python-yaml',
+ 'prettytable': 'python-prettytable',
+ 'argparse': 'python-argparse',
+}
+
+
+def write_debian_folder(root, version, revno, init_sys):
+ deb_dir = util.abs_join(root, 'debian')
+ os.makedirs(deb_dir)
+
+ # Fill in the change log template
+ templater.render_to_file(util.abs_join('debian', 'changelog'),
+ util.abs_join(deb_dir, 'changelog'),
+ params={
+ 'version': version,
+ 'revision': revno,
+ })
+
+ # Write out the control file template
+ cmd = [sys.executable,
+ util.abs_join(os.pardir, 'tools', 'read-dependencies')]
+ (stdout, _stderr) = util.subp(cmd)
+
+ # Map to known packages
+ pkgs = [p.lower().strip() for p in stdout.splitlines()]
+ requires = []
+ for p in pkgs:
+ tgt_pkg = None
+ for name in PKG_MP.keys():
+ if p.find(name) != -1:
+ tgt_pkg = PKG_MP.get(name)
+ break
+ if not tgt_pkg:
+ raise RuntimeError(("Do not know how to translate %s to "
+ " a known package") % (p))
+ else:
+ requires.append(tgt_pkg)
+
+ templater.render_to_file(util.abs_join('debian', 'control'),
+ util.abs_join(deb_dir, 'control'),
+ params={'requires': requires})
+
+ templater.render_to_file(util.abs_join('debian', 'rules'),
+ util.abs_join(deb_dir, 'rules'),
+ params={'init_sys': init_sys})
+
+ # Just copy the following directly
+ for base_fn in ['dirs', 'copyright', 'compat', 'pycompat']:
+ shutil.copy(util.abs_join('debian', base_fn),
+ util.abs_join(deb_dir, base_fn))
+
+
+def main():
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-n", "--no-sign", dest="sign",
+ help=("attempt to sign "
+ "the package (default: %(default)s)"),
+ default=True,
+ action='store_false')
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ help=("run verbosely"
+ " (default: %(default)s)"),
+ default=False,
+ action='store_true')
+ parser.add_argument("-b", "--boot", dest="boot",
+ help="select boot type (default: %(default)s)",
+ metavar="TYPE", default='upstart',
+ choices=('upstart', 'upstart-local'))
+ args = parser.parse_args()
+
+ capture = True
+ if args.verbose:
+ capture = False
+
+ with util.tempdir() as tdir:
+
+ cmd = [sys.executable,
+ util.abs_join(os.pardir, 'tools', 'read-version')]
+ (sysout, _stderr) = util.subp(cmd)
+ version = sysout.strip()
+
+ cmd = ['bzr', 'revno']
+ (sysout, _stderr) = util.subp(cmd)
+ revno = sysout.strip()
+
+ # This is really only a temporary archive
+ # since we will extract it then add in the debian
+ # folder, then re-archive it for debian happiness
+ print("Creating a temporary tarball using the 'make-tarball' helper")
+ cmd = [sys.executable,
+ util.abs_join(os.getcwd(), 'make-tarball')]
+ (sysout, _stderr) = util.subp(cmd)
+ arch_fn = sysout.strip()
+ tmp_arch_fn = util.abs_join(tdir, os.path.basename(arch_fn))
+ shutil.move(arch_fn, tmp_arch_fn)
+
+ print("Extracting temporary tarball %r" % (tmp_arch_fn))
+ cmd = ['tar', '-xvzf', tmp_arch_fn, '-C', tdir]
+ util.subp(cmd, capture=capture)
+ base_name = os.path.basename(arch_fn)[:-len(".tar.gz")]
+ shutil.move(util.abs_join(tdir, base_name),
+ util.abs_join(tdir, 'cloud-init'))
+
+ print("Creating a debian/ folder in %r" %
+ (util.abs_join(tdir, 'cloud-init')))
+ write_debian_folder(util.abs_join(tdir, 'cloud-init'),
+ version, revno, args.boot)
+
+ # The naming here seems to follow some debian standard
+ # so it will whine if it is changed...
+ tar_fn = "cloud-init_%s~%s.orig.tar.gz" % (version, revno)
+ print("Archiving that new folder into %r" % (tar_fn))
+ cmd = ['tar', '-czvf',
+ util.abs_join(tdir, tar_fn),
+ '-C', util.abs_join(tdir, 'cloud-init')]
+ cmd.extend(os.listdir(util.abs_join(tdir, 'cloud-init')))
+ util.subp(cmd, capture=capture)
+ shutil.copy(util.abs_join(tdir, tar_fn), tar_fn)
+ print("Wrote out archive %r" % (util.abs_join(tar_fn)))
+
+ print("Running 'debuild' in %r" % (util.abs_join(tdir, 'cloud-init')))
+ with util.chdir(util.abs_join(tdir, 'cloud-init')):
+ cmd = ['debuild']
+ if not args.sign:
+ cmd.extend(['-us', '-uc'])
+ util.subp(cmd, capture=capture)
+
+ globs = []
+ globs.extend(glob.glob("%s/*.deb" %
+ (os.path.join(tdir))))
+ link_fn = os.path.join(os.getcwd(), 'cloud-init_all.deb')
+ for fn in globs:
+ base_fn = os.path.basename(fn)
+ shutil.move(fn, base_fn)
+ print("Wrote out debian package %r" % (base_fn))
+ if fn.endswith('_all.deb'):
+ # Add in the local link
+ util.del_file(link_fn)
+ os.symlink(base_fn, link_fn)
+ print("Linked %r to %r" % (base_fn, link_fn))
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/packages/brpm b/packages/brpm
new file mode 100755
index 00000000..1d05bd2a
--- /dev/null
+++ b/packages/brpm
@@ -0,0 +1,216 @@
+#!/usr/bin/python
+
+import contextlib
+import glob
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+import re
+
+import argparse
+
+# Use the util functions from cloudinit
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+from cloudinit import templater
+from cloudinit import util
+
+from datetime import datetime
+
+
+# Mapping of expected packages to there full name...
+PKG_MP = {
+ 'boto': 'python-boto',
+ 'tempita': 'python-tempita',
+ 'prettytable': 'python-prettytable',
+ 'oauth': 'python-oauth',
+ 'configobj': 'python-configobj',
+ 'yaml': 'PyYAML',
+ 'argparse': 'python-argparse'
+}
+
+
+def get_log_header(version):
+ # Try to find the version in the tags output
+ cmd = ['bzr', 'tags']
+ (stdout, _stderr) = util.subp(cmd)
+ a_rev = None
+ for t in stdout.splitlines():
+ ver, rev = t.split(None)
+ if ver == version:
+ a_rev = rev
+ break
+ if not a_rev:
+ return format_change_line(datetime.now(),
+ '??', version)
+
+ # Extract who made that tag as the header
+ cmd = ['bzr', 'log', '-r%s' % (a_rev), '--timezone=utc']
+ (stdout, _stderr) = util.subp(cmd)
+ kvs = {
+ 'comment': version,
+ }
+
+ for line in stdout.splitlines():
+ if line.startswith('committer:'):
+ kvs['who'] = line[len('committer:'):].strip()
+ if line.startswith('timestamp:'):
+ ts = line[len('timestamp:'):]
+ ts = ts.strip()
+ # http://bugs.python.org/issue6641
+ ts = ts.replace("+0000", '').strip()
+ ds = datetime.strptime(ts, '%a %Y-%m-%d %H:%M:%S')
+ kvs['ds'] = ds
+
+ return format_change_line(**kvs)
+
+
+def format_change_line(ds, who, comment=None):
+ # Rpmbuild seems to be pretty strict about the date format
+ d = ds.strftime("%a %b %d %Y")
+ d += " - %s" % (who)
+ if comment:
+ d += " - %s" % (comment)
+ return "* %s" % (d)
+
+
+def generate_spec_contents(args, tmpl_fn):
+
+ # Figure out the version and revno
+ cmd = [sys.executable,
+ util.abs_join(os.pardir, 'tools', 'read-version')]
+ (stdout, _stderr) = util.subp(cmd)
+ version = stdout.strip()
+
+ cmd = ['bzr', 'revno']
+ (stdout, _stderr) = util.subp(cmd)
+ revno = stdout.strip()
+
+ # Tmpl params
+ subs = {}
+ subs['version'] = version
+ subs['revno'] = revno
+ subs['release'] = revno
+ subs['archive_name'] = '%{name}-%{version}-' + revno + '.tar.gz'
+ subs['bd_requires'] = ['python-devel', 'python-setuptools']
+
+ cmd = [sys.executable,
+ util.abs_join(os.pardir, 'tools', 'read-dependencies')]
+ (stdout, _stderr) = util.subp(cmd)
+
+ # Map to known packages
+ pkgs = [p.lower().strip() for p in stdout.splitlines()]
+
+ # Map to known packages
+ requires = []
+ for p in pkgs:
+ tgt_pkg = None
+ for name in PKG_MP.keys():
+ if p.find(name) != -1:
+ tgt_pkg = PKG_MP.get(name)
+ break
+ if not tgt_pkg:
+ raise RuntimeError(("Do not know how to translate %s to "
+ " a known package") % (p))
+ else:
+ requires.append(tgt_pkg)
+ subs['requires'] = requires
+
+ # Format a nice changelog (as best as we can)
+ changelog = util.load_file(util.abs_join(os.pardir, 'ChangeLog'))
+ changelog_lines = []
+ for line in changelog.splitlines():
+ if not line.strip():
+ continue
+ if re.match(r"^\s*[\d][.][\d][.][\d]:\s*", line):
+ line = line.strip(":")
+ header = get_log_header(line)
+ changelog_lines.append(header)
+ else:
+ changelog_lines.append(line)
+ subs['changelog'] = "\n".join(changelog_lines)
+
+ if args.boot == 'initd':
+ subs['init_d'] = True
+ subs['init_d_local'] = False
+ elif args.boot == 'initd-local':
+ subs['init_d'] = True
+ subs['init_d_local'] = True
+ else:
+ subs['init_d'] = False
+ subs['init_d_local'] = False
+
+ if args.boot == 'systemd':
+ subs['systemd'] = True
+ else:
+ subs['systemd'] = False
+
+ subs['init_sys'] = args.boot
+ return templater.render_from_file(tmpl_fn, params=subs)
+
+
+def main():
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-b", "--boot", dest="boot",
+ help="select boot type (default: %(default)s)",
+ metavar="TYPE", default='initd',
+ choices=('initd', 'systemd', 'initd-local'))
+ parser.add_argument("-v", "--verbose", dest="verbose",
+ help=("run verbosely"
+ " (default: %(default)s)"),
+ default=False,
+ action='store_true')
+ args = parser.parse_args()
+ capture = True
+ if args.verbose:
+ capture = False
+
+ # Clean out the root dir and make sure the dirs we want are in place
+ root_dir = os.path.expanduser("~/rpmbuild")
+ if os.path.isdir(root_dir):
+ shutil.rmtree(root_dir)
+ arc_dir = util.abs_join(root_dir, 'SOURCES')
+ util.ensure_dirs([root_dir, arc_dir])
+
+ # Archive the code
+ cmd = [sys.executable,
+ util.abs_join(os.getcwd(), 'make-tarball')]
+ (stdout, _stderr) = util.subp(cmd)
+ archive_fn = stdout.strip()
+ real_archive_fn = os.path.join(arc_dir, os.path.basename(archive_fn))
+ shutil.move(archive_fn, real_archive_fn)
+
+ # Form the spec file to be used
+ tmpl_fn = util.abs_join(os.getcwd(), 'redhat', 'cloud-init.spec')
+ contents = generate_spec_contents(args, tmpl_fn)
+ spec_fn = os.path.join(root_dir, 'cloud-init.spec')
+ util.write_file(spec_fn, contents)
+
+ # Now build it!
+ cmd = ['rpmbuild', '-ba', spec_fn]
+ util.subp(cmd, capture=capture)
+
+ # Copy the items built to our local dir
+ globs = []
+ globs.extend(glob.glob("%s/*.rpm" %
+ (os.path.join(root_dir, 'RPMS', 'noarch'))))
+ globs.extend(glob.glob("%s/*.rpm" %
+ (os.path.join(root_dir, 'RPMS'))))
+ globs.extend(glob.glob("%s/*.rpm" %
+ (os.path.join(root_dir, 'SRPMS'))))
+ for rpm_fn in globs:
+ tgt_fn = util.abs_join(os.getcwd(), os.path.basename(rpm_fn))
+ shutil.move(rpm_fn, tgt_fn)
+ print(tgt_fn)
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
diff --git a/debian.trunk/changelog b/packages/debian/changelog
index 53e3678c..ac5bcf98 100644
--- a/debian.trunk/changelog
+++ b/packages/debian/changelog
@@ -1,4 +1,4 @@
-cloud-init (VERSION~REVNO-0) UNRELEASED; urgency=low
+cloud-init ({{version}}~{{revision}}-1) UNRELEASED; urgency=low
* build
diff --git a/debian.trunk/compat b/packages/debian/compat
index 7ed6ff82..7ed6ff82 100644
--- a/debian.trunk/compat
+++ b/packages/debian/compat
diff --git a/debian.trunk/control b/packages/debian/control
index f2eec1e4..e00901af 100644
--- a/debian.trunk/control
+++ b/packages/debian/control
@@ -10,19 +10,17 @@ Build-Depends: cdbs,
pylint,
python-mocker,
XS-Python-Version: all
-Standards-Version: 3.9.1
+Standards-Version: 3.9.3
Package: cloud-init
Architecture: all
Depends: cloud-utils,
procps,
python,
- python-boto (>=2.0),
- python-cheetah,
- python-configobj,
- python-oauth,
+{{for r in requires}}
+ {{r}},
+{{endfor}}
python-software-properties,
- python-yaml,
${misc:Depends},
${python:Depends}
XB-Python-Version: ${python:Versions}
diff --git a/debian.trunk/copyright b/packages/debian/copyright
index dc993525..dc993525 100644
--- a/debian.trunk/copyright
+++ b/packages/debian/copyright
diff --git a/debian.trunk/dirs b/packages/debian/dirs
index f3de468d..f3de468d 100644
--- a/debian.trunk/dirs
+++ b/packages/debian/dirs
diff --git a/debian.trunk/pycompat b/packages/debian/pycompat
index 0cfbf088..0cfbf088 100644
--- a/debian.trunk/pycompat
+++ b/packages/debian/pycompat
diff --git a/packages/debian/rules b/packages/debian/rules
new file mode 100755
index 00000000..87cd6538
--- /dev/null
+++ b/packages/debian/rules
@@ -0,0 +1,17 @@
+#!/usr/bin/make -f
+
+DEB_PYTHON2_MODULE_PACKAGES = cloud-init
+
+binary-install/cloud-init::cloud-init-fixups
+
+include /usr/share/cdbs/1/rules/debhelper.mk
+include /usr/share/cdbs/1/class/python-distutils.mk
+
+DEB_PYTHON_INSTALL_ARGS_ALL += --init-system={{init_sys}}
+
+DEB_DH_INSTALL_SOURCEDIR := debian/tmp
+
+cloud-init-fixups:
+ install -d $(DEB_DESTDIR)/etc/rsyslog.d
+ cp tools/21-cloudinit.conf $(DEB_DESTDIR)/etc/rsyslog.d/21-cloudinit.conf
+
diff --git a/tools/make-dist-tarball b/packages/make-dist-tarball
index d6d53aa7..622283bd 100755
--- a/tools/make-dist-tarball
+++ b/packages/make-dist-tarball
@@ -9,7 +9,7 @@ Usage: ${0##*/} version
EOF
}
-topdir=$PWD
+topdir="../$PWD"
tag=${1}
[ -n "$tag" ] || { Usage 1>&2 ; exit 1; }
@@ -22,4 +22,4 @@ out=${topdir}/cloud-init-${tag}.tar.gz
cd ${tmpd} &&
bzr branch -r "tag:${tag}" "${topdir}" ./cloud-init-${tag} &&
tar czf "${out}" cloud-init-${tag}/ --exclude cloud-init-${tag}/.bzr &&
- echo "wrote ${out}"
+ echo "Wrote ${out}"
diff --git a/packages/make-tarball b/packages/make-tarball
new file mode 100755
index 00000000..43a6fc33
--- /dev/null
+++ b/packages/make-tarball
@@ -0,0 +1,89 @@
+#!/usr/bin/python
+
+import contextlib
+import os
+import shutil
+import subprocess
+import sys
+import tempfile
+
+import optparse
+
+
+# Use the util functions from cloudinit
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+from cloudinit import util
+
+
+def find_versioned_files():
+ (stdout, _stderr) = util.subp(['bzr', 'ls', '--versioned', '--recursive'])
+ fns = [fn for fn in stdout.splitlines()
+ if fn and not fn.startswith('.')]
+ fns.sort()
+ return fns
+
+
+def copy(fn, where_to, verbose):
+ if verbose:
+ print("Copying %r --> %r" % (fn, where_to))
+ if os.path.isfile(fn):
+ shutil.copy(fn, where_to)
+ elif os.path.isdir(fn) and not os.path.isdir(where_to):
+ os.makedirs(where_to)
+ else:
+ raise RuntimeError("Do not know how to copy %s" % (fn))
+
+
+def main():
+
+ parser = optparse.OptionParser()
+ parser.add_option("-f", "--file", dest="filename",
+ help="write archive to FILE", metavar="FILE")
+ parser.add_option("-v", "--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="show verbose messaging")
+
+ (options, args) = parser.parse_args()
+
+ base_fn = options.filename
+ if not base_fn:
+ (stdout, _stderr) = util.subp(['bzr', 'revno'])
+ revno = stdout.strip()
+ cmd = [sys.executable,
+ util.abs_join(os.pardir, 'tools', 'read-version')]
+ (stdout, _stderr) = util.subp(cmd)
+ version = stdout.strip()
+ base_fn = 'cloud-init-%s-%s' % (version, revno)
+
+ with util.tempdir() as tdir:
+ util.ensure_dir(util.abs_join(tdir, base_fn))
+ arch_fn = '%s.tar.gz' % (base_fn)
+
+ with util.chdir(os.pardir):
+ fns = find_versioned_files()
+ for fn in fns:
+ copy(fn, util.abs_join(tdir, base_fn, fn),
+ verbose=options.verbose)
+
+ arch_full_fn = util.abs_join(tdir, arch_fn)
+ cmd = ['tar', '-czvf', arch_full_fn, '-C', tdir, base_fn]
+ if options.verbose:
+ print("Creating an archive from directory %r to %r" %
+ (util.abs_join(tdir, base_fn), arch_full_fn))
+
+ util.subp(cmd, capture=(not options.verbose))
+ shutil.move(util.abs_join(tdir, arch_fn),
+ util.abs_join(os.getcwd(), arch_fn))
+
+ print(os.path.abspath(arch_fn))
+
+ return 0
+
+
+if __name__ == '__main__':
+ sys.exit(main())
+
diff --git a/packages/redhat/cloud-init.spec b/packages/redhat/cloud-init.spec
new file mode 100644
index 00000000..d0f83a4b
--- /dev/null
+++ b/packages/redhat/cloud-init.spec
@@ -0,0 +1,183 @@
+%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print get_python_lib()")}
+
+# See: http://www.zarb.org/~jasonc/macros.php
+# Or: http://fedoraproject.org/wiki/Packaging:ScriptletSnippets
+# Or: http://www.rpm.org/max-rpm/ch-rpm-inside.html
+
+Name: cloud-init
+Version: {{version}}
+Release: {{release}}%{?dist}
+Summary: Cloud instance init scripts
+
+Group: System Environment/Base
+License: GPLv3
+URL: http://launchpad.net/cloud-init
+
+Source0: {{archive_name}}
+BuildArch: noarch
+BuildRoot: %{_tmppath}
+
+BuildRequires: python-devel
+BuildRequires: python-setuptools
+
+# System util packages needed
+Requires: shadow-utils
+Requires: rsyslog
+Requires: iproute
+Requires: e2fsprogs
+Requires: net-tools
+Requires: procps
+Requires: shadow-utils
+
+# Install pypi 'dynamic' requirements
+{{for r in requires}}
+Requires: {{r}}
+{{endfor}}
+
+{{if init_d}}
+Requires(post): chkconfig
+Requires(postun): initscripts
+Requires(preun): chkconfig
+Requires(preun): initscripts
+{{endif}}
+
+{{if systemd}}
+BuildRequires: systemd-units
+Requires(post): systemd-units
+Requires(postun): systemd-units
+Requires(preun): systemd-units
+{{endif}}
+
+%description
+Cloud-init is a set of init scripts for cloud instances. Cloud instances
+need special scripts to run during initialization to retrieve and install
+ssh keys and to let the user run various scripts.
+
+%prep
+%setup -q -n %{name}-%{version}-{{revno}}
+
+%build
+%{__python} setup.py build
+
+%install
+rm -rf $RPM_BUILD_ROOT
+%{__python} setup.py install -O1 \
+ --skip-build --root $RPM_BUILD_ROOT \
+ --init-system={{init_sys}}
+
+# Note that /etc/rsyslog.d didn't exist by default until F15.
+# el6 request: https://bugzilla.redhat.com/show_bug.cgi?id=740420
+mkdir -p $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d
+cp -p tools/21-cloudinit.conf \
+ $RPM_BUILD_ROOT/%{_sysconfdir}/rsyslog.d/21-cloudinit.conf
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%post
+
+{{if systemd}}
+if [ $1 -eq 1 ]
+then
+ /bin/systemctl enable cloud-config.service >/dev/null 2>&1 || :
+ /bin/systemctl enable cloud-final.service >/dev/null 2>&1 || :
+ /bin/systemctl enable cloud-init.service >/dev/null 2>&1 || :
+ /bin/systemctl enable cloud-init-local.service >/dev/null 2>&1 || :
+fi
+{{endif}}
+
+{{if init_d_local}}
+/sbin/chkconfig --add %{_initrddir}/cloud-init-local
+{{elif init_d}}
+/sbin/chkconfig --add %{_initrddir}/cloud-init
+{{endif}}
+{{if init_d}}
+/sbin/chkconfig --add %{_initrddir}/cloud-config
+/sbin/chkconfig --add %{_initrddir}/cloud-final
+{{endif}}
+
+%preun
+
+{{if init_d_local}}
+if [ $1 -eq 0 ]
+then
+ /sbin/service cloud-init-local stop >/dev/null 2>&1
+ /sbin/chkconfig --del cloud-init-local
+fi
+{{elif init_d}}
+if [ $1 -eq 0 ]
+then
+ /sbin/service cloud-init stop >/dev/null 2>&1
+ /sbin/chkconfig --del cloud-init
+fi
+{{endif}}
+{{if init_d}}
+if [ $1 -eq 0 ]
+then
+ /sbin/service cloud-config stop >/dev/null 2>&1
+ /sbin/chkconfig --del cloud-config
+ /sbin/service cloud-final stop >/dev/null 2>&1
+ /sbin/chkconfig --del cloud-final
+fi
+{{endif}}
+
+{{if systemd}}
+if [ $1 -eq 0 ]
+then
+ /bin/systemctl --no-reload disable cloud-config.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-final.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-init.service >/dev/null 2>&1 || :
+ /bin/systemctl --no-reload disable cloud-init-local.service >/dev/null 2>&1 || :
+fi
+{{endif}}
+
+%postun
+
+{{if systemd}}
+/bin/systemctl daemon-reload >/dev/null 2>&1 || :
+{{endif}}
+
+%files
+
+{{if init_d}}
+%attr(0755, root, root) %{_initddir}/cloud-config
+%attr(0755, root, root) %{_initddir}/cloud-final
+{{endif}}
+{{if init_d_local}}
+%attr(0755, root, root) %{_initddir}/cloud-init-local
+{{elif init_d}}
+%attr(0755, root, root) %{_initddir}/cloud-init
+{{endif}}
+
+{{if systemd}}
+%{_unitdir}/cloud-*
+{{endif}}
+
+# Program binaries
+%{_bindir}/cloud-init*
+
+# There doesn't seem to be an agreed upon place for these
+# although it appears the standard says /usr/lib but rpmbuild
+# will try /usr/lib64 ??
+/usr/lib/%{name}/uncloud-init
+/usr/lib/%{name}/write-ssh-key-fingerprints
+
+# Docs
+%doc TODO LICENSE ChangeLog Requires
+%doc %{_defaultdocdir}/cloud-init/*
+
+# Configs
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg
+%dir %{_sysconfdir}/cloud/cloud.cfg.d
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/*.cfg
+%config(noreplace) %{_sysconfdir}/cloud/cloud.cfg.d/README
+%dir %{_sysconfdir}/cloud/templates
+%config(noreplace) %{_sysconfdir}/cloud/templates/*
+%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
+
+# Python code is here...
+%{python_sitelib}/*
+
+%changelog
+
+{{changelog}}
diff --git a/setup.py b/setup.py
index f32662b8..06b897a5 100755
--- a/setup.py
+++ b/setup.py
@@ -1,10 +1,12 @@
-#!/usr/bin/python
# vi: ts=4 expandtab
#
# Distutils magic for ec2-init
+#
# Copyright (C) 2009 Canonical Ltd.
+# Copyright (C) 2012 Yahoo! Inc.
#
# Author: Soren Hansen <soren@canonical.com>
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
@@ -17,36 +19,119 @@
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-#
-from distutils.core import setup
+
from glob import glob
-import os.path
+
+import os
+import re
+
+import setuptools
+from setuptools.command.install import install
+
+from distutils.command.install_data import install_data
+from distutils.errors import DistutilsArgError
+
import subprocess
+
def is_f(p):
- return(os.path.isfile(p))
+ return os.path.isfile(p)
+
+
+INITSYS_FILES = {
+ 'sysvinit': filter((lambda x: is_f(x)), glob('sysvinit/*')),
+ 'systemd': filter((lambda x: is_f(x)), glob('systemd/*')),
+ 'upstart': filter((lambda x: is_f(x)), glob('upstart/*')),
+}
+INITSYS_ROOTS = {
+ 'sysvinit': '/etc/rc.d/init.d',
+ 'systemd': '/etc/systemd/system/',
+ 'upstart': '/etc/init/',
+}
+INITSYS_TYPES = sorted(list(INITSYS_ROOTS.keys()))
+
+
+def tiny_p(cmd, capture=True):
+ # Darn python 2.6 doesn't have check_output (argggg)
+ stdout = subprocess.PIPE
+ stderr = subprocess.PIPE
+ if not capture:
+ stdout = None
+ stderr = None
+ sp = subprocess.Popen(cmd, stdout=stdout,
+ stderr=stderr, stdin=None)
+ (out, err) = sp.communicate()
+ if sp.returncode not in [0]:
+ raise RuntimeError("Failed running %s [rc=%s] (%s, %s)"
+ % (cmd, sp.returncode, out, err))
+ return (out, err)
+
+
+def get_version():
+ cmd = ['tools/read-version']
+ (ver, _e) = tiny_p(cmd)
+ return ver.strip()
+
+
+def read_requires():
+ cmd = ['tools/read-dependencies']
+ (deps, _e) = tiny_p(cmd)
+ return deps.splitlines()
+
+
+# TODO: Is there a better way to do this??
+class InitsysInstallData(install):
+ user_options = install.user_options + [
+ # This will magically show up in member variable 'init_sys'
+ ('init-system=', None,
+ ('init system to configure (%s) [default: None]') %
+ (", ".join(INITSYS_TYPES))
+ ),
+ ]
+
+ def initialize_options(self):
+ install.initialize_options(self)
+ self.init_system = None
+
+ def finalize_options(self):
+ install.finalize_options(self)
+ if self.init_system and self.init_system not in INITSYS_TYPES:
+ raise DistutilsArgError(
+ ("You must specify one of (%s) when"
+ " specifying a init system!") % (", ".join(INITSYS_TYPES))
+ )
+ elif self.init_system:
+ self.distribution.data_files.append((INITSYS_ROOTS[self.init_system],
+ INITSYS_FILES[self.init_system]))
+ # Force that command to reinitalize (with new file list)
+ self.distribution.reinitialize_command('install_data', True)
+
-setup(name='cloud-init',
- version='0.6.3',
+setuptools.setup(name='cloud-init',
+ version=get_version(),
description='EC2 initialisation magic',
author='Scott Moser',
author_email='scott.moser@canonical.com',
url='http://launchpad.net/cloud-init/',
- packages=['cloudinit', 'cloudinit.CloudConfig' ],
- scripts=['cloud-init.py',
- 'cloud-init-cfg.py',
+ packages=setuptools.find_packages(exclude=['tests']),
+ scripts=['bin/cloud-init',
'tools/cloud-init-per',
],
+ license='GPLv3',
data_files=[('/etc/cloud', glob('config/*.cfg')),
('/etc/cloud/cloud.cfg.d', glob('config/cloud.cfg.d/*')),
('/etc/cloud/templates', glob('templates/*')),
- ('/etc/init', glob('upstart/*.conf')),
('/usr/share/cloud-init', []),
- ('/usr/lib/cloud-init',
+ ('/usr/lib/cloud-init',
['tools/uncloud-init', 'tools/write-ssh-key-fingerprints']),
- ('/usr/share/doc/cloud-init', filter(is_f,glob('doc/*'))),
- ('/usr/share/doc/cloud-init/examples', filter(is_f,glob('doc/examples/*'))),
- ('/usr/share/doc/cloud-init/examples/seed', filter(is_f,glob('doc/examples/seed/*'))),
- ('/etc/profile.d', ['tools/Z99-cloud-locale-test.sh']),
+ ('/usr/share/doc/cloud-init', filter(is_f, glob('doc/*'))),
+ ('/usr/share/doc/cloud-init/examples', filter(is_f, glob('doc/examples/*'))),
+ ('/usr/share/doc/cloud-init/examples/seed', filter(is_f, glob('doc/examples/seed/*'))),
],
+ install_requires=read_requires(),
+ cmdclass = {
+ # Use a subclass for install that handles
+ # adding on the right init system configuration files
+ 'install': InitsysInstallData,
+ },
)
diff --git a/sysvinit/cloud-config b/sysvinit/cloud-config
new file mode 100755
index 00000000..dd0bca8b
--- /dev/null
+++ b/sysvinit/cloud-config
@@ -0,0 +1,124 @@
+#!/bin/sh
+
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# See: http://wiki.debian.org/LSBInitScripts
+# See: http://tiny.cc/czvbgw
+# See: http://www.novell.com/coolsolutions/feature/15380.html
+# Also based on dhcpd in RHEL (for comparison)
+
+### BEGIN INIT INFO
+# Provides: cloud-config
+# Required-Start: cloud-init
+# Should-Start: $time
+# Required-Stop:
+# Should-Stop:
+# Default-Start: 3 5
+# Default-Stop:
+# Short-Description: The config cloud-init job
+# Description: Start cloud-init and runs the config phase
+# and any associated config modules as desired.
+### END INIT INFO
+
+. /etc/init.d/functions
+
+# Return values acc. to LSB for all commands but status:
+# 0 - success
+# 1 - generic or unspecified error
+# 2 - invalid or excess argument(s)
+# 3 - unimplemented feature (e.g. "reload")
+# 4 - user had insufficient privileges
+# 5 - program is not installed
+# 6 - program is not configured
+# 7 - program is not running
+# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
+#
+# Note that starting an already running service, stopping
+# or restarting a not-running service as well as the restart
+# with force-reload (in case signaling is not supported) are
+# considered a success.
+
+RETVAL=0
+
+prog="cloud-init"
+cloud_init="/usr/bin/cloud-init"
+conf="/etc/cloud/cloud.cfg"
+
+# If there exists a sysconfig variable override file use it...
+[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+
+start() {
+ [ -x $cloud_init ] || return 5
+ [ -f $conf ] || return 6
+
+ echo -n $"Starting $prog: "
+ $cloud_init $CLOUDINITARGS modules --mode config
+ RETVAL=$?
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Shutting down $prog: "
+ # No-op
+ RETVAL=7
+ return $RETVAL
+}
+
+. /etc/init.d/functions
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart|try-restart|condrestart)
+ ## Stop the service and regardless of whether it was
+ ## running or not, start it again.
+ #
+ ## Note: try-restart is now part of LSB (as of 1.9).
+ ## RH has a similar command named condrestart.
+ start
+ RETVAL=$?
+ ;;
+ reload|force-reload)
+ # It does not support reload
+ RETVAL=3
+ ;;
+ status)
+ echo -n $"Checking for service $prog:"
+ # Return value is slightly different for the status command:
+ # 0 - service up and running
+ # 1 - service dead, but /var/run/ pid file exists
+ # 2 - service dead, but /var/lock/ lock file exists
+ # 3 - service not running (unused)
+ # 4 - service status unknown :-(
+ # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
+ RETVAL=3
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
+ RETVAL=3
+ ;;
+esac
+
+exit $RETVAL
diff --git a/sysvinit/cloud-final b/sysvinit/cloud-final
new file mode 100755
index 00000000..2e462c17
--- /dev/null
+++ b/sysvinit/cloud-final
@@ -0,0 +1,124 @@
+#!/bin/sh
+
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# See: http://wiki.debian.org/LSBInitScripts
+# See: http://tiny.cc/czvbgw
+# See: http://www.novell.com/coolsolutions/feature/15380.html
+# Also based on dhcpd in RHEL (for comparison)
+
+### BEGIN INIT INFO
+# Provides: cloud-final
+# Required-Start: $all cloud-init cloud-config
+# Should-Start: $time
+# Required-Stop:
+# Should-Stop:
+# Default-Start: 3 5
+# Default-Stop:
+# Short-Description: The final cloud-init job
+# Description: Start cloud-init and runs the final phase
+# and any associated final modules as desired.
+### END INIT INFO
+
+. /etc/init.d/functions
+
+# Return values acc. to LSB for all commands but status:
+# 0 - success
+# 1 - generic or unspecified error
+# 2 - invalid or excess argument(s)
+# 3 - unimplemented feature (e.g. "reload")
+# 4 - user had insufficient privileges
+# 5 - program is not installed
+# 6 - program is not configured
+# 7 - program is not running
+# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
+#
+# Note that starting an already running service, stopping
+# or restarting a not-running service as well as the restart
+# with force-reload (in case signaling is not supported) are
+# considered a success.
+
+RETVAL=0
+
+prog="cloud-init"
+cloud_init="/usr/bin/cloud-init"
+conf="/etc/cloud/cloud.cfg"
+
+# If there exists a sysconfig variable override file use it...
+[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+
+start() {
+ [ -x $cloud_init ] || return 5
+ [ -f $conf ] || return 6
+
+ echo -n $"Starting $prog: "
+ $cloud_init $CLOUDINITARGS modules --mode final
+ RETVAL=$?
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Shutting down $prog: "
+ # No-op
+ RETVAL=7
+ return $RETVAL
+}
+
+. /etc/init.d/functions
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart|try-restart|condrestart)
+ ## Stop the service and regardless of whether it was
+ ## running or not, start it again.
+ #
+ ## Note: try-restart is now part of LSB (as of 1.9).
+ ## RH has a similar command named condrestart.
+ start
+ RETVAL=$?
+ ;;
+ reload|force-reload)
+ # It does not support reload
+ RETVAL=3
+ ;;
+ status)
+ echo -n $"Checking for service $prog:"
+ # Return value is slightly different for the status command:
+ # 0 - service up and running
+ # 1 - service dead, but /var/run/ pid file exists
+ # 2 - service dead, but /var/lock/ lock file exists
+ # 3 - service not running (unused)
+ # 4 - service status unknown :-(
+ # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
+ RETVAL=3
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
+ RETVAL=3
+ ;;
+esac
+
+exit $RETVAL
diff --git a/sysvinit/cloud-init b/sysvinit/cloud-init
new file mode 100755
index 00000000..7726c452
--- /dev/null
+++ b/sysvinit/cloud-init
@@ -0,0 +1,124 @@
+#!/bin/sh
+
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# See: http://wiki.debian.org/LSBInitScripts
+# See: http://tiny.cc/czvbgw
+# See: http://www.novell.com/coolsolutions/feature/15380.html
+# Also based on dhcpd in RHEL (for comparison)
+
+### BEGIN INIT INFO
+# Provides: cloud-init
+# Required-Start: $local_fs $network $named $remote_fs
+# Should-Start: $time
+# Required-Stop:
+# Should-Stop:
+# Default-Start: 3 5
+# Default-Stop:
+# Short-Description: The initial cloud-init job (net and fs contingent)
+# Description: Start cloud-init and runs the initialization phase
+# and any associated initial modules as desired.
+### END INIT INFO
+
+. /etc/init.d/functions
+
+# Return values acc. to LSB for all commands but status:
+# 0 - success
+# 1 - generic or unspecified error
+# 2 - invalid or excess argument(s)
+# 3 - unimplemented feature (e.g. "reload")
+# 4 - user had insufficient privileges
+# 5 - program is not installed
+# 6 - program is not configured
+# 7 - program is not running
+# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
+#
+# Note that starting an already running service, stopping
+# or restarting a not-running service as well as the restart
+# with force-reload (in case signaling is not supported) are
+# considered a success.
+
+RETVAL=0
+
+prog="cloud-init"
+cloud_init="/usr/bin/cloud-init"
+conf="/etc/cloud/cloud.cfg"
+
+# If there exists a sysconfig variable override file use it...
+[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+
+start() {
+ [ -x $cloud_init ] || return 5
+ [ -f $conf ] || return 6
+
+ echo -n $"Starting $prog: "
+ $cloud_init $CLOUDINITARGS init
+ RETVAL=$?
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Shutting down $prog: "
+ # No-op
+ RETVAL=7
+ return $RETVAL
+}
+
+. /etc/init.d/functions
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart|try-restart|condrestart)
+ ## Stop the service and regardless of whether it was
+ ## running or not, start it again.
+ #
+ ## Note: try-restart is now part of LSB (as of 1.9).
+ ## RH has a similar command named condrestart.
+ start
+ RETVAL=$?
+ ;;
+ reload|force-reload)
+ # It does not support reload
+ RETVAL=3
+ ;;
+ status)
+ echo -n $"Checking for service $prog:"
+ # Return value is slightly different for the status command:
+ # 0 - service up and running
+ # 1 - service dead, but /var/run/ pid file exists
+ # 2 - service dead, but /var/lock/ lock file exists
+ # 3 - service not running (unused)
+ # 4 - service status unknown :-(
+ # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
+ RETVAL=3
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
+ RETVAL=3
+ ;;
+esac
+
+exit $RETVAL
diff --git a/sysvinit/cloud-init-local b/sysvinit/cloud-init-local
new file mode 100755
index 00000000..bf5d409a
--- /dev/null
+++ b/sysvinit/cloud-init-local
@@ -0,0 +1,124 @@
+#!/bin/sh
+
+#
+# Copyright (C) 2012 Yahoo! Inc.
+#
+# Author: Joshua Harlow <harlowja@yahoo-inc.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# See: http://wiki.debian.org/LSBInitScripts
+# See: http://tiny.cc/czvbgw
+# See: http://www.novell.com/coolsolutions/feature/15380.html
+# Also based on dhcpd in RHEL (for comparison)
+
+### BEGIN INIT INFO
+# Provides: cloud-init
+# Required-Start: $local_fs $remote_fs
+# Should-Start: $time
+# Required-Stop:
+# Should-Stop:
+# Default-Start: 3 5
+# Default-Stop:
+# Short-Description: The initial cloud-init job (local fs contingent)
+# Description: Start cloud-init and runs the initialization phases
+# and any associated initial modules as desired.
+### END INIT INFO
+
+. /etc/init.d/functions
+
+# Return values acc. to LSB for all commands but status:
+# 0 - success
+# 1 - generic or unspecified error
+# 2 - invalid or excess argument(s)
+# 3 - unimplemented feature (e.g. "reload")
+# 4 - user had insufficient privileges
+# 5 - program is not installed
+# 6 - program is not configured
+# 7 - program is not running
+# 8--199 - reserved (8--99 LSB, 100--149 distrib, 150--199 appl)
+#
+# Note that starting an already running service, stopping
+# or restarting a not-running service as well as the restart
+# with force-reload (in case signaling is not supported) are
+# considered a success.
+
+RETVAL=0
+
+prog="cloud-init"
+cloud_init="/usr/bin/cloud-init"
+conf="/etc/cloud/cloud.cfg"
+
+# If there exists a sysconfig variable override file use it...
+[ -f /etc/sysconfig/cloud-init ] && . /etc/sysconfig/cloud-init
+
+start() {
+ [ -x $cloud_init ] || return 5
+ [ -f $conf ] || return 6
+
+ echo -n $"Starting $prog: "
+ $cloud_init $CLOUDINITARGS init --local
+ RETVAL=$?
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Shutting down $prog: "
+ # No-op
+ RETVAL=7
+ return $RETVAL
+}
+
+. /etc/init.d/functions
+
+case "$1" in
+ start)
+ start
+ RETVAL=$?
+ ;;
+ stop)
+ stop
+ RETVAL=$?
+ ;;
+ restart|try-restart|condrestart)
+ ## Stop the service and regardless of whether it was
+ ## running or not, start it again.
+ #
+ ## Note: try-restart is now part of LSB (as of 1.9).
+ ## RH has a similar command named condrestart.
+ start
+ RETVAL=$?
+ ;;
+ reload|force-reload)
+ # It does not support reload
+ RETVAL=3
+ ;;
+ status)
+ echo -n $"Checking for service $prog:"
+ # Return value is slightly different for the status command:
+ # 0 - service up and running
+ # 1 - service dead, but /var/run/ pid file exists
+ # 2 - service dead, but /var/lock/ lock file exists
+ # 3 - service not running (unused)
+ # 4 - service status unknown :-(
+ # 5--199 reserved (5--99 LSB, 100--149 distro, 150--199 appl.)
+ RETVAL=3
+ ;;
+ *)
+ echo "Usage: $0 {start|stop|status|try-restart|condrestart|restart|force-reload|reload}"
+ RETVAL=3
+ ;;
+esac
+
+exit $RETVAL
diff --git a/templates/chef_client.rb.tmpl b/templates/chef_client.rb.tmpl
index d3d9a922..35123ced 100644
--- a/templates/chef_client.rb.tmpl
+++ b/templates/chef_client.rb.tmpl
@@ -1,12 +1,12 @@
log_level :info
log_location "/var/log/chef/client.log"
ssl_verify_mode :verify_none
-validation_client_name "$validation_name"
+validation_client_name "{{validation_name}}"
validation_key "/etc/chef/validation.pem"
client_key "/etc/chef/client.pem"
-chef_server_url "$server_url"
-environment "$environment"
-node_name "$node_name"
+chef_server_url "{{server_url}}"
+environment "{{environment}}"
+node_name "{{node_name}}"
json_attribs "/etc/chef/firstboot.json"
file_cache_path "/var/cache/chef"
file_backup_path "/var/backups/chef"
diff --git a/templates/default-locale.tmpl b/templates/default-locale.tmpl
deleted file mode 100644
index 7940672b..00000000
--- a/templates/default-locale.tmpl
+++ /dev/null
@@ -1 +0,0 @@
-LANG="$locale"
diff --git a/templates/hosts.redhat.tmpl b/templates/hosts.redhat.tmpl
new file mode 100644
index 00000000..cfc40668
--- /dev/null
+++ b/templates/hosts.redhat.tmpl
@@ -0,0 +1,22 @@
+{{# This file /etc/cloud/templates/hosts.tmpl is only utilized
+ if enabled in cloud-config. Specifically, in order to enable it
+ you need to add the following to config:
+ manage_etc_hosts: True}}
+#
+# Your system has configured 'manage_etc_hosts' as True.
+# As a result, if you wish for changes to this file to persist
+# then you will need to either
+# a.) make changes to the master file in /etc/cloud/templates/hosts.tmpl
+# b.) change or remove the value of 'manage_etc_hosts' in
+# /etc/cloud/cloud.cfg or cloud-config from user-data
+#
+# The following lines are desirable for IPv4 capable hosts
+127.0.0.1 {{fqdn}} {{hostname}}
+127.0.0.1 localhost.localdomain localhost
+127.0.0.1 localhost4.localdomain4 localhost4
+
+# The following lines are desirable for IPv6 capable hosts
+::1 {{fqdn}} {{hostname}}
+::1 localhost.localdomain localhost
+::1 localhost6.localdomain6 localhost6
+
diff --git a/templates/hosts.tmpl b/templates/hosts.ubuntu.tmpl
index ae120b02..9eebe971 100644
--- a/templates/hosts.tmpl
+++ b/templates/hosts.ubuntu.tmpl
@@ -1,9 +1,7 @@
-## This file (/etc/cloud/templates/hosts.tmpl) is only utilized
-## if enabled in cloud-config. Specifically, in order to enable it
-## you need to add the following to config:
-## manage_etc_hosts: True
-##
-## Note, double-hash commented lines will not appear in /etc/hosts
+{{# This file /etc/cloud/templates/hosts.tmpl is only utilized
+ if enabled in cloud-config. Specifically, in order to enable it
+ you need to add the following to config:
+ manage_etc_hosts: True}}
#
# Your system has configured 'manage_etc_hosts' as True.
# As a result, if you wish for changes to this file to persist
@@ -12,8 +10,8 @@
# b.) change or remove the value of 'manage_etc_hosts' in
# /etc/cloud/cloud.cfg or cloud-config from user-data
#
-## The value '$hostname' will be replaced with the local-hostname
-127.0.1.1 $fqdn $hostname
+# The following lines are desirable for IPv4 capable hosts
+127.0.1.1 {{fqdn}} {{hostname}}
127.0.0.1 localhost
# The following lines are desirable for IPv6 capable hosts
@@ -23,3 +21,4 @@ ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
ff02::3 ip6-allhosts
+
diff --git a/templates/sources.list.tmpl b/templates/sources.list.tmpl
index f702025f..8acbd7d5 100644
--- a/templates/sources.list.tmpl
+++ b/templates/sources.list.tmpl
@@ -1,60 +1,59 @@
-\## Note, this file is written by cloud-init on first boot of an instance
-\## modifications made here will not survive a re-bundle.
-\## if you wish to make changes you can:
-\## a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
-\## or do the same in user-data
-\## b.) add sources in /etc/apt/sources.list.d
-\## c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
-\###
+# Note, this file is written by cloud-init on first boot of an instance
+# modifications made here will not survive a re-bundle.
+# if you wish to make changes you can:
+# a.) add 'apt_preserve_sources_list: true' to /etc/cloud/cloud.cfg
+# or do the same in user-data
+# b.) add sources in /etc/apt/sources.list.d
+# c.) make changes to template file /etc/cloud/templates/sources.list.tmpl
# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
# newer versions of the distribution.
-deb $mirror $codename main
-deb-src $mirror $codename main
+deb {{mirror}} {{codename}} main
+deb-src {{mirror}} {{codename}} main
-\## Major bug fix updates produced after the final release of the
-\## distribution.
-deb $mirror $codename-updates main
-deb-src $mirror $codename-updates main
+# Major bug fix updates produced after the final release of the
+# distribution.
+deb {{mirror}} {{codename}}-updates main
+deb-src {{mirror}} {{codename}}-updates main
-\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-\## team. Also, please note that software in universe WILL NOT receive any
-\## review or updates from the Ubuntu security team.
-deb $mirror $codename universe
-deb-src $mirror $codename universe
-deb $mirror $codename-updates universe
-deb-src $mirror $codename-updates universe
+# N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+# team. Also, please note that software in universe WILL NOT receive any
+# review or updates from the Ubuntu security team.
+deb {{mirror}} {{codename}} universe
+deb-src {{mirror}} {{codename}} universe
+deb {{mirror}} {{codename}}-updates universe
+deb-src {{mirror}} {{codename}}-updates universe
-\## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
-\## team, and may not be under a free licence. Please satisfy yourself as to
-\## your rights to use the software. Also, please note that software in
-\## multiverse WILL NOT receive any review or updates from the Ubuntu
-\## security team.
-# deb $mirror $codename multiverse
-# deb-src $mirror $codename multiverse
-# deb $mirror $codename-updates multiverse
-# deb-src $mirror $codename-updates multiverse
+# N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+# team, and may not be under a free licence. Please satisfy yourself as to
+# your rights to use the software. Also, please note that software in
+# multiverse WILL NOT receive any review or updates from the Ubuntu
+# security team.
+# deb {{mirror}} {{codename}} multiverse
+# deb-src {{mirror}} {{codename}} multiverse
+# deb {{mirror}} {{codename}}-updates multiverse
+# deb-src {{mirror}} {{codename}}-updates multiverse
-\## Uncomment the following two lines to add software from the 'backports'
-\## repository.
-\## N.B. software from this repository may not have been tested as
-\## extensively as that contained in the main release, although it includes
-\## newer versions of some applications which may provide useful features.
-\## Also, please note that software in backports WILL NOT receive any review
-\## or updates from the Ubuntu security team.
-# deb $mirror $codename-backports main restricted universe multiverse
-# deb-src $mirror $codename-backports main restricted universe multiverse
+# Uncomment the following two lines to add software from the 'backports'
+# repository.
+# N.B. software from this repository may not have been tested as
+# extensively as that contained in the main release, although it includes
+# newer versions of some applications which may provide useful features.
+# Also, please note that software in backports WILL NOT receive any review
+# or updates from the Ubuntu security team.
+# deb {{mirror}} {{codename}}-backports main restricted universe multiverse
+# deb-src {{mirror}} {{codename}}-backports main restricted universe multiverse
-\## Uncomment the following two lines to add software from Canonical's
-\## 'partner' repository.
-\## This software is not part of Ubuntu, but is offered by Canonical and the
-\## respective vendors as a service to Ubuntu users.
-# deb http://archive.canonical.com/ubuntu $codename partner
-# deb-src http://archive.canonical.com/ubuntu $codename partner
+# Uncomment the following two lines to add software from Canonical's
+# 'partner' repository.
+# This software is not part of Ubuntu, but is offered by Canonical and the
+# respective vendors as a service to Ubuntu users.
+# deb http://archive.canonical.com/ubuntu {{codename}} partner
+# deb-src http://archive.canonical.com/ubuntu {{codename}} partner
-deb http://security.ubuntu.com/ubuntu $codename-security main
-deb-src http://security.ubuntu.com/ubuntu $codename-security main
-deb http://security.ubuntu.com/ubuntu $codename-security universe
-deb-src http://security.ubuntu.com/ubuntu $codename-security universe
-# deb http://security.ubuntu.com/ubuntu $codename-security multiverse
-# deb-src http://security.ubuntu.com/ubuntu $codename-security multiverse
+deb http://security.ubuntu.com/ubuntu {{codename}}-security main
+deb-src http://security.ubuntu.com/ubuntu {{codename}}-security main
+deb http://security.ubuntu.com/ubuntu {{codename}}-security universe
+deb-src http://security.ubuntu.com/ubuntu {{codename}}-security universe
+# deb http://security.ubuntu.com/ubuntu {{codename}}-security multiverse
+# deb-src http://security.ubuntu.com/ubuntu {{codename}}-security multiverse
diff --git a/tests/configs/sample1.yaml b/tests/configs/sample1.yaml
new file mode 100644
index 00000000..24e874ee
--- /dev/null
+++ b/tests/configs/sample1.yaml
@@ -0,0 +1,53 @@
+#cloud-config
+#apt_update: false
+#apt_upgrade: true
+packages: [ bzr, pastebinit, ubuntu-dev-tools, ccache, bzr-builddeb, vim-nox, git-core, lftp ]
+
+#apt_sources:
+# - source: ppa:smoser/ppa
+
+#disable_root: False
+
+# mounts:
+# - [ ephemeral0, /mnt ]
+# - [ swap, none, swap, sw, 0, 0 ]
+
+ssh_import_id: [smoser ]
+
+#!/bin/sh
+
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
+
+sm_misc:
+ - &user_setup |
+ set -x; exec > ~/user_setup.log 2>&1
+ echo "starting at $(date -R)"
+ echo "set -o vi" >> ~/.bashrc
+ cat >> ~/.profile <<"EOF"
+ export EDITOR=vi
+ export DEB_BUILD_OPTIONS=parallel=4
+ export PATH=/usr/lib/ccache:$PATH
+ EOF
+
+ mkdir ~/bin
+ chmod 755 ~/bin
+ cat > ~/bin/mdebuild <<"EOF"
+ #!/bin/sh
+ exec debuild --prepend-path /usr/lib/ccache "$@"
+ EOF
+ chmod 755 ~/bin/*
+
+ #byobu-launcher-install
+ byobu-ctrl-a screen 2>&1 || :
+
+ echo "pinging 8.8.8.8"
+ ping -c 4 8.8.8.8
+
+runcmd:
+ - [ sudo, -Hu, ubuntu, sh, -c, '[ -e /var/log/cloud-init.log ] || exit 0; grep "cloud-init.*running" /var/log/cloud-init.log > ~/runcmd.log' ]
+ - [ sudo, -Hu, ubuntu, sh, -c, 'read up sleep < /proc/uptime; echo $(date): runcmd up at $up | tee -a ~/runcmd.log' ]
+ - [ sudo, -Hu, ubuntu, sh, -c, *user_setup ]
+
+
+byobu_by_default: user
+output: {all: '| tee -a /var/log/cloud-init-output.log'}
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 4f60f0ea..af18955d 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -1,18 +1,42 @@
-from mocker import MockerTestCase, ANY, ARGS, KWARGS
+import StringIO
+import logging
import os
+import sys
+
+from mocker import MockerTestCase, ANY, ARGS, KWARGS
+
+from cloudinit import handlers
+from cloudinit import helpers
+from cloudinit import importer
+from cloudinit import log
+from cloudinit import settings
+from cloudinit import url_helper
+from cloudinit import util
+
+
+class FakeModule(handlers.Handler):
+ def __init__(self):
+ handlers.Handler.__init__(self, settings.PER_ALWAYS)
+ self.types = []
+
+ def list_types(self):
+ return self.types
+
+ def _handle_part(self, data, ctype, filename, payload, frequency):
+ pass
-from cloudinit import (partwalker_handle_handler, handler_handle_part,
- handler_register, get_cmdline_url)
-from cloudinit.util import write_file, logexc, readurl
+class TestWalkerHandleHandler(MockerTestCase):
-class TestPartwalkerHandleHandler(MockerTestCase):
def setUp(self):
+
+ MockerTestCase.setUp(self)
+
self.data = {
"handlercount": 0,
- "frequency": "?",
- "handlerdir": "?",
- "handlers": [],
+ "frequency": "",
+ "handlerdir": self.makeDir(),
+ "handlers": helpers.ContentHandlers(),
"data": None}
self.expected_module_name = "part-handler-%03d" % (
@@ -20,179 +44,138 @@ class TestPartwalkerHandleHandler(MockerTestCase):
expected_file_name = "%s.py" % self.expected_module_name
expected_file_fullname = os.path.join(self.data["handlerdir"],
expected_file_name)
- self.module_fake = "fake module handle"
+ self.module_fake = FakeModule()
self.ctype = None
self.filename = None
self.payload = "dummy payload"
# Mock the write_file function
- write_file_mock = self.mocker.replace(write_file, passthrough=False)
+ write_file_mock = self.mocker.replace(util.write_file, passthrough=False)
write_file_mock(expected_file_fullname, self.payload, 0600)
def test_no_errors(self):
"""Payload gets written to file and added to C{pdata}."""
- # Mock the __import__ builtin
- import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock = self.mocker.replace(importer.import_module, passthrough=False)
import_mock(self.expected_module_name)
self.mocker.result(self.module_fake)
- # Mock the handle_register function
- handle_reg_mock = self.mocker.replace(handler_register,
- passthrough=False)
- handle_reg_mock(self.module_fake, self.data["handlers"],
- self.data["data"], self.data["frequency"])
- # Activate mocks
self.mocker.replay()
-
- partwalker_handle_handler(self.data, self.ctype, self.filename,
- self.payload)
-
+
+ handlers.walker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
+
self.assertEqual(1, self.data["handlercount"])
-
+
def test_import_error(self):
"""Module import errors are logged. No handler added to C{pdata}"""
- # Mock the __import__ builtin
- import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock = self.mocker.replace(importer.import_module, passthrough=False)
import_mock(self.expected_module_name)
self.mocker.throw(ImportError())
- # Mock log function
- logexc_mock = self.mocker.replace(logexc, passthrough=False)
- logexc_mock(ANY)
- # Mock the print_exc function
- print_exc_mock = self.mocker.replace("traceback.print_exc",
- passthrough=False)
- print_exc_mock(ARGS, KWARGS)
- # Activate mocks
self.mocker.replay()
- partwalker_handle_handler(self.data, self.ctype, self.filename,
- self.payload)
+ handlers.walker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
self.assertEqual(0, self.data["handlercount"])
def test_attribute_error(self):
"""Attribute errors are logged. No handler added to C{pdata}"""
- # Mock the __import__ builtin
- import_mock = self.mocker.replace("__builtin__.__import__")
+ import_mock = self.mocker.replace(importer.import_module, passthrough=False)
import_mock(self.expected_module_name)
self.mocker.result(self.module_fake)
- # Mock the handle_register function
- handle_reg_mock = self.mocker.replace(handler_register,
- passthrough=False)
- handle_reg_mock(self.module_fake, self.data["handlers"],
- self.data["data"], self.data["frequency"])
self.mocker.throw(AttributeError())
- # Mock log function
- logexc_mock = self.mocker.replace(logexc, passthrough=False)
- logexc_mock(ANY)
- # Mock the print_exc function
- print_exc_mock = self.mocker.replace("traceback.print_exc",
- passthrough=False)
- print_exc_mock(ARGS, KWARGS)
- # Activate mocks
self.mocker.replay()
- partwalker_handle_handler(self.data, self.ctype, self.filename,
- self.payload)
+ handlers.walker_handle_handler(self.data, self.ctype, self.filename,
+ self.payload)
self.assertEqual(0, self.data["handlercount"])
class TestHandlerHandlePart(MockerTestCase):
+
def setUp(self):
self.data = "fake data"
self.ctype = "fake ctype"
self.filename = "fake filename"
self.payload = "fake payload"
- self.frequency = "once-per-instance"
+ self.frequency = settings.PER_INSTANCE
def test_normal_version_1(self):
"""
C{handle_part} is called without C{frequency} for
C{handler_version} == 1.
"""
- # Build a mock part-handler module
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
- self.mocker.result("once-per-instance")
+ self.mocker.result(settings.PER_INSTANCE)
getattr(mod_mock, "handler_version")
self.mocker.result(1)
mod_mock.handle_part(self.data, self.ctype, self.filename,
self.payload)
self.mocker.replay()
- handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
def test_normal_version_2(self):
"""
C{handle_part} is called with C{frequency} for
C{handler_version} == 2.
"""
- # Build a mock part-handler module
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
- self.mocker.result("once-per-instance")
+ self.mocker.result(settings.PER_INSTANCE)
getattr(mod_mock, "handler_version")
self.mocker.result(2)
mod_mock.handle_part(self.data, self.ctype, self.filename,
self.payload, self.frequency)
self.mocker.replay()
- handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
def test_modfreq_per_always(self):
"""
C{handle_part} is called regardless of frequency if nofreq is always.
"""
self.frequency = "once"
- # Build a mock part-handler module
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
- self.mocker.result("always")
+ self.mocker.result(settings.PER_ALWAYS)
getattr(mod_mock, "handler_version")
self.mocker.result(1)
mod_mock.handle_part(self.data, self.ctype, self.filename,
self.payload)
self.mocker.replay()
- handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
def test_no_handle_when_modfreq_once(self):
"""C{handle_part} is not called if frequency is once"""
self.frequency = "once"
- # Build a mock part-handler module
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
- self.mocker.result("once-per-instance")
+ self.mocker.result(settings.PER_ONCE)
self.mocker.replay()
- handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
def test_exception_is_caught(self):
"""Exceptions within C{handle_part} are caught and logged."""
- # Build a mock part-handler module
mod_mock = self.mocker.mock()
getattr(mod_mock, "frequency")
- self.mocker.result("once-per-instance")
+ self.mocker.result(settings.PER_INSTANCE)
getattr(mod_mock, "handler_version")
self.mocker.result(1)
mod_mock.handle_part(self.data, self.ctype, self.filename,
self.payload)
self.mocker.throw(Exception())
- # Mock log function
- logexc_mock = self.mocker.replace(logexc, passthrough=False)
- logexc_mock(ANY)
- # Mock the print_exc function
- print_exc_mock = self.mocker.replace("traceback.print_exc",
- passthrough=False)
- print_exc_mock(ARGS, KWARGS)
self.mocker.replay()
- handler_handle_part(mod_mock, self.data, self.ctype, self.filename,
- self.payload, self.frequency)
+ handlers.run_part(mod_mock, self.data, self.ctype, self.filename,
+ self.payload, self.frequency)
class TestCmdlineUrl(MockerTestCase):
@@ -202,14 +185,13 @@ class TestCmdlineUrl(MockerTestCase):
payload = "0"
cmdline = "ro %s=%s bar=1" % (key, url)
- mock_readurl = self.mocker.replace(readurl, passthrough=False)
+ mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False)
mock_readurl(url)
- self.mocker.result(payload)
-
+ self.mocker.result(url_helper.UrlResponse(200, payload))
self.mocker.replay()
self.assertEqual((key, url, None),
- get_cmdline_url(names=[key], starts="xxxxxx", cmdline=cmdline))
+ util.get_cmdline_url(names=[key], starts="xxxxxx", cmdline=cmdline))
def test_valid_content(self):
url = "http://example.com/foo"
@@ -217,14 +199,13 @@ class TestCmdlineUrl(MockerTestCase):
payload = "xcloud-config\nmydata: foo\nbar: wark\n"
cmdline = "ro %s=%s bar=1" % (key, url)
- mock_readurl = self.mocker.replace(readurl, passthrough=False)
+ mock_readurl = self.mocker.replace(url_helper.readurl, passthrough=False)
mock_readurl(url)
- self.mocker.result(payload)
-
+ self.mocker.result(url_helper.UrlResponse(200, payload))
self.mocker.replay()
self.assertEqual((key, url, payload),
- get_cmdline_url(names=[key], starts="xcloud-config",
+ util.get_cmdline_url(names=[key], starts="xcloud-config",
cmdline=cmdline))
def test_no_key_found(self):
@@ -232,11 +213,12 @@ class TestCmdlineUrl(MockerTestCase):
key = "mykey"
cmdline = "ro %s=%s bar=1" % (key, url)
- self.mocker.replace(readurl, passthrough=False)
+ self.mocker.replace(url_helper.readurl, passthrough=False)
+ self.mocker.result(url_helper.UrlResponse(400))
self.mocker.replay()
self.assertEqual((None, None, None),
- get_cmdline_url(names=["does-not-appear"],
+ util.get_cmdline_url(names=["does-not-appear"],
starts="#cloud-config", cmdline=cmdline))
# vi: ts=4 expandtab
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
new file mode 100644
index 00000000..84d85d4d
--- /dev/null
+++ b/tests/unittests/test_builtin_handlers.py
@@ -0,0 +1,54 @@
+"""Tests of the built-in user data handlers"""
+
+import os
+
+from mocker import MockerTestCase
+
+from cloudinit import handlers
+from cloudinit import helpers
+from cloudinit import util
+
+from cloudinit.handlers import upstart_job
+
+from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
+
+
+class TestBuiltins(MockerTestCase):
+
+ def test_upstart_frequency_no_out(self):
+ c_root = self.makeDir()
+ up_root = self.makeDir()
+ paths = helpers.Paths({
+ 'cloud_dir': c_root,
+ 'upstart_dir': up_root,
+ })
+ freq = PER_ALWAYS
+ h = upstart_job.UpstartJobPartHandler(paths)
+ # No files should be written out when
+ # the frequency is ! per-instance
+ h.handle_part('', handlers.CONTENT_START,
+ None, None, None)
+ h.handle_part('blah', 'text/upstart-job',
+ 'test.conf', 'blah', freq)
+ h.handle_part('', handlers.CONTENT_END,
+ None, None, None)
+ self.assertEquals(0, len(os.listdir(up_root)))
+
+ def test_upstart_frequency_single(self):
+ c_root = self.makeDir()
+ up_root = self.makeDir()
+ paths = helpers.Paths({
+ 'cloud_dir': c_root,
+ 'upstart_dir': up_root,
+ })
+ freq = PER_INSTANCE
+ h = upstart_job.UpstartJobPartHandler(paths)
+ # No files should be written out when
+ # the frequency is ! per-instance
+ h.handle_part('', handlers.CONTENT_START,
+ None, None, None)
+ h.handle_part('blah', 'text/upstart-job',
+ 'test.conf', 'blah', freq)
+ h.handle_part('', handlers.CONTENT_END,
+ None, None, None)
+ self.assertEquals(1, len(os.listdir(up_root)))
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 7659dd03..261c410a 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -1,14 +1,11 @@
-from tempfile import mkdtemp
-from shutil import rmtree
import os
from StringIO import StringIO
from copy import copy
-from cloudinit.DataSourceMAAS import (
- MAASSeedDirNone,
- MAASSeedDirMalformed,
- read_maas_seed_dir,
- read_maas_seed_url,
-)
+
+from cloudinit import util
+from cloudinit import url_helper
+from cloudinit.sources import DataSourceMAAS
+
from mocker import MockerTestCase
@@ -17,12 +14,7 @@ class TestMAASDataSource(MockerTestCase):
def setUp(self):
super(TestMAASDataSource, self).setUp()
# Make a temp directoy for tests to use.
- self.tmp = mkdtemp(prefix="unittest_")
-
- def tearDown(self):
- super(TestMAASDataSource, self).tearDown()
- # Clean up temp directory
- rmtree(self.tmp)
+ self.tmp = self.makeDir()
def test_seed_dir_valid(self):
"""Verify a valid seeddir is read as such"""
@@ -35,7 +27,7 @@ class TestMAASDataSource(MockerTestCase):
my_d = os.path.join(self.tmp, "valid")
populate_dir(my_d, data)
- (userdata, metadata) = read_maas_seed_dir(my_d)
+ (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, data['user-data'])
for key in ('instance-id', 'local-hostname'):
@@ -54,7 +46,7 @@ class TestMAASDataSource(MockerTestCase):
my_d = os.path.join(self.tmp, "valid_extra")
populate_dir(my_d, data)
- (userdata, metadata) = read_maas_seed_dir(my_d)
+ (userdata, metadata) = DataSourceMAAS.read_maas_seed_dir(my_d)
self.assertEqual(userdata, data['user-data'])
for key in ('instance-id', 'local-hostname'):
@@ -76,24 +68,28 @@ class TestMAASDataSource(MockerTestCase):
invalid_data = copy(valid)
del invalid_data['local-hostname']
populate_dir(my_d, invalid_data)
- self.assertRaises(MAASSeedDirMalformed, read_maas_seed_dir, my_d)
+ self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir, my_d)
# missing 'instance-id'
my_d = "%s-02" % my_based
invalid_data = copy(valid)
del invalid_data['instance-id']
populate_dir(my_d, invalid_data)
- self.assertRaises(MAASSeedDirMalformed, read_maas_seed_dir, my_d)
+ self.assertRaises(DataSourceMAAS.MAASSeedDirMalformed,
+ DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_none(self):
"""Verify that empty seed_dir raises MAASSeedDirNone"""
my_d = os.path.join(self.tmp, "valid_empty")
- self.assertRaises(MAASSeedDirNone, read_maas_seed_dir, my_d)
+ self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir, my_d)
def test_seed_dir_missing(self):
"""Verify that missing seed_dir raises MAASSeedDirNone"""
- self.assertRaises(MAASSeedDirNone, read_maas_seed_dir,
+ self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
+ DataSourceMAAS.read_maas_seed_dir,
os.path.join(self.tmp, "nonexistantdirectory"))
def test_seed_url_valid(self):
@@ -102,30 +98,30 @@ class TestMAASDataSource(MockerTestCase):
'meta-data/local-hostname': 'test-hostname',
'meta-data/public-keys': 'test-hostname',
'user-data': 'foodata'}
-
+ valid_order = [
+ 'meta-data/local-hostname',
+ 'meta-data/instance-id',
+ 'meta-data/public-keys',
+ 'user-data',
+ ]
my_seed = "http://example.com/xmeta"
my_ver = "1999-99-99"
my_headers = {'header1': 'value1', 'header2': 'value2'}
def my_headers_cb(url):
- return(my_headers)
+ return my_headers
- mock_request = self.mocker.replace("urllib2.Request",
- passthrough=False)
- mock_urlopen = self.mocker.replace("urllib2.urlopen",
+ mock_request = self.mocker.replace(url_helper.readurl,
passthrough=False)
- for (key, val) in valid.iteritems():
- mock_request("%s/%s/%s" % (my_seed, my_ver, key),
- data=None, headers=my_headers)
- self.mocker.nospec()
- self.mocker.result("fake-request-%s" % key)
- mock_urlopen("fake-request-%s" % key, timeout=None)
- self.mocker.result(StringIO(val))
-
+ for key in valid_order:
+ url = "%s/%s/%s" % (my_seed, my_ver, key)
+ mock_request(url, headers=my_headers, timeout=None)
+ resp = valid.get(key)
+ self.mocker.result(url_helper.UrlResponse(200, resp))
self.mocker.replay()
- (userdata, metadata) = read_maas_seed_url(my_seed,
+ (userdata, metadata) = DataSourceMAAS.read_maas_seed_url(my_seed,
header_cb=my_headers_cb, version=my_ver)
self.assertEqual("foodata", userdata)
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 21d2442f..1f96e992 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -1,9 +1,12 @@
from mocker import MockerTestCase
-from cloudinit.util import write_file, delete_dir_contents
-from cloudinit.CloudConfig.cc_ca_certs import (
- handle, update_ca_certs, add_ca_certs, remove_default_ca_certs)
-from logging import getLogger
+from cloudinit import util
+from cloudinit import cloud
+from cloudinit import helpers
+
+from cloudinit.config import cc_ca_certs
+
+import logging
class TestNoConfig(MockerTestCase):
@@ -11,36 +14,37 @@ class TestNoConfig(MockerTestCase):
super(TestNoConfig, self).setUp()
self.name = "ca-certs"
self.cloud_init = None
- self.log = getLogger("TestNoConfig")
+ self.log = logging.getLogger("TestNoConfig")
self.args = []
def test_no_config(self):
"""
Test that nothing is done if no ca-certs configuration is provided.
"""
- config = {"unknown-key": "value"}
-
- self.mocker.replace(write_file, passthrough=False)
- self.mocker.replace(update_ca_certs, passthrough=False)
+ config = util.get_builtin_cfg()
+ self.mocker.replace(util.write_file, passthrough=False)
+ self.mocker.replace(cc_ca_certs.update_ca_certs, passthrough=False)
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud_init, self.log, self.args)
class TestConfig(MockerTestCase):
def setUp(self):
super(TestConfig, self).setUp()
self.name = "ca-certs"
- self.cloud_init = None
- self.log = getLogger("TestNoConfig")
+ self.paths = None
+ self.cloud = cloud.Cloud(None, self.paths, None, None, None)
+ self.log = logging.getLogger("TestNoConfig")
self.args = []
# Mock out the functions that actually modify the system
- self.mock_add = self.mocker.replace(add_ca_certs, passthrough=False)
- self.mock_update = self.mocker.replace(update_ca_certs,
+ self.mock_add = self.mocker.replace(cc_ca_certs.add_ca_certs, passthrough=False)
+ self.mock_update = self.mocker.replace(cc_ca_certs.update_ca_certs,
passthrough=False)
- self.mock_remove = self.mocker.replace(remove_default_ca_certs,
+ self.mock_remove = self.mocker.replace(cc_ca_certs.remove_default_ca_certs,
passthrough=False)
+
# Order must be correct
self.mocker.order()
@@ -55,7 +59,7 @@ class TestConfig(MockerTestCase):
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_empty_trusted_list(self):
"""Test that no certificate are written if 'trusted' list is empty"""
@@ -65,37 +69,37 @@ class TestConfig(MockerTestCase):
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_single_trusted(self):
"""Test that a single cert gets passed to add_ca_certs"""
config = {"ca-certs": {"trusted": ["CERT1"]}}
- self.mock_add(["CERT1"])
+ self.mock_add(self.paths, ["CERT1"])
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_multiple_trusted(self):
"""Test that multiple certs get passed to add_ca_certs"""
config = {"ca-certs": {"trusted": ["CERT1", "CERT2"]}}
- self.mock_add(["CERT1", "CERT2"])
+ self.mock_add(self.paths, ["CERT1", "CERT2"])
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_remove_default_ca_certs(self):
"""Test remove_defaults works as expected"""
config = {"ca-certs": {"remove-defaults": True}}
- self.mock_remove()
+ self.mock_remove(self.paths)
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_no_remove_defaults_if_false(self):
"""Test remove_defaults is not called when config value is False"""
@@ -104,72 +108,85 @@ class TestConfig(MockerTestCase):
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
def test_correct_order_for_remove_then_add(self):
"""Test remove_defaults is not called when config value is False"""
config = {"ca-certs": {"remove-defaults": True, "trusted": ["CERT1"]}}
- self.mock_remove()
- self.mock_add(["CERT1"])
+ self.mock_remove(self.paths)
+ self.mock_add(self.paths, ["CERT1"])
self.mock_update()
self.mocker.replay()
- handle(self.name, config, self.cloud_init, self.log, self.args)
+ cc_ca_certs.handle(self.name, config, self.cloud, self.log, self.args)
class TestAddCaCerts(MockerTestCase):
+
+ def setUp(self):
+ super(TestAddCaCerts, self).setUp()
+ self.paths = helpers.Paths({
+ 'cloud_dir': self.makeDir()
+ })
+
def test_no_certs_in_list(self):
"""Test that no certificate are written if not provided."""
- self.mocker.replace(write_file, passthrough=False)
+ self.mocker.replace(util.write_file, passthrough=False)
self.mocker.replay()
-
- add_ca_certs([])
+ cc_ca_certs.add_ca_certs(self.paths, [])
def test_single_cert(self):
"""Test adding a single certificate to the trusted CAs"""
cert = "CERT1\nLINE2\nLINE3"
- mock_write = self.mocker.replace(write_file, passthrough=False)
+ mock_write = self.mocker.replace(util.write_file, passthrough=False)
mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
cert, mode=0644)
mock_write("/etc/ca-certificates.conf",
- "\ncloud-init-ca-certs.crt", omode="a")
+ "\ncloud-init-ca-certs.crt", omode="ab")
self.mocker.replay()
- add_ca_certs([cert])
+ cc_ca_certs.add_ca_certs(self.paths, [cert])
def test_multiple_certs(self):
"""Test adding multiple certificates to the trusted CAs"""
certs = ["CERT1\nLINE2\nLINE3", "CERT2\nLINE2\nLINE3"]
expected_cert_file = "\n".join(certs)
- mock_write = self.mocker.replace(write_file, passthrough=False)
+ mock_write = self.mocker.replace(util.write_file, passthrough=False)
mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
expected_cert_file, mode=0644)
mock_write("/etc/ca-certificates.conf",
- "\ncloud-init-ca-certs.crt", omode="a")
+ "\ncloud-init-ca-certs.crt", omode="ab")
self.mocker.replay()
- add_ca_certs(certs)
+ cc_ca_certs.add_ca_certs(self.paths, certs)
class TestUpdateCaCerts(MockerTestCase):
def test_commands(self):
- mock_check_call = self.mocker.replace("subprocess.check_call",
+ mock_check_call = self.mocker.replace(util.subp,
passthrough=False)
- mock_check_call(["update-ca-certificates"])
+ mock_check_call(["update-ca-certificates"], capture=False)
self.mocker.replay()
- update_ca_certs()
+ cc_ca_certs.update_ca_certs()
class TestRemoveDefaultCaCerts(MockerTestCase):
+
+ def setUp(self):
+ super(TestRemoveDefaultCaCerts, self).setUp()
+ self.paths = helpers.Paths({
+ 'cloud_dir': self.makeDir()
+ })
+
def test_commands(self):
- mock_delete_dir_contents = self.mocker.replace(delete_dir_contents,
+ mock_delete_dir_contents = self.mocker.replace(util.delete_dir_contents,
passthrough=False)
- mock_write = self.mocker.replace(write_file, passthrough=False)
- mock_subp = self.mocker.replace("cloudinit.util.subp",
+ mock_write = self.mocker.replace(util.write_file, passthrough=False)
+ mock_subp = self.mocker.replace(util.subp,
passthrough=False)
mock_delete_dir_contents("/usr/share/ca-certificates/")
@@ -179,4 +196,4 @@ class TestRemoveDefaultCaCerts(MockerTestCase):
"ca-certificates ca-certificates/trust_new_crts select no")
self.mocker.replay()
- remove_default_ca_certs()
+ cc_ca_certs.remove_default_ca_certs(self.paths)
diff --git a/tests/unittests/test_userdata.py b/tests/unittests/test_userdata.py
index 8eb7b259..861642b6 100644
--- a/tests/unittests/test_userdata.py
+++ b/tests/unittests/test_userdata.py
@@ -1,107 +1,144 @@
"""Tests for handling of userdata within cloud init"""
-import logging
import StringIO
+import logging
+import os
+import shutil
+import tempfile
+
from email.mime.base import MIMEBase
from mocker import MockerTestCase
-import cloudinit
-from cloudinit.DataSource import DataSource
-
+from cloudinit import helpers
+from cloudinit import log
+from cloudinit import sources
+from cloudinit import stages
+from cloudinit import util
-instance_id = "i-testing"
+INSTANCE_ID = "i-testing"
-class FakeDataSource(DataSource):
+class FakeDataSource(sources.DataSource):
def __init__(self, userdata):
- DataSource.__init__(self)
- self.metadata = {'instance-id': instance_id}
+ sources.DataSource.__init__(self, {}, None, None)
+ self.metadata = {'instance-id': INSTANCE_ID}
self.userdata_raw = userdata
-class TestConsumeUserData(MockerTestCase):
+# FIXME: these tests shouldn't be checking log output??
+# Weirddddd...
+
- _log_handler = None
- _log = None
- log_file = None
+class TestConsumeUserData(MockerTestCase):
def setUp(self):
+ MockerTestCase.setUp(self)
+ # Replace the write so no actual files
+ # get written out...
self.mock_write = self.mocker.replace("cloudinit.util.write_file",
passthrough=False)
- self.mock_write(self.get_ipath("cloud_config"), "", 0600)
- self.capture_log()
+ self._log = None
+ self._log_file = None
+ self._log_handler = None
def tearDown(self):
- self._log.removeHandler(self._log_handler)
-
- @staticmethod
- def get_ipath(name):
- return "%s/instances/%s%s" % (cloudinit.varlibdir, instance_id,
- cloudinit.pathmap[name])
-
- def capture_log(self):
- self.log_file = StringIO.StringIO()
- self._log_handler = logging.StreamHandler(self.log_file)
- self._log_handler.setLevel(logging.DEBUG)
- self._log = logging.getLogger(cloudinit.logger_name)
+ MockerTestCase.tearDown(self)
+ if self._log_handler and self._log:
+ self._log.removeHandler(self._log_handler)
+
+ def capture_log(self, lvl=logging.DEBUG):
+ log_file = StringIO.StringIO()
+ self._log_handler = logging.StreamHandler(log_file)
+ self._log_handler.setLevel(lvl)
+ self._log = log.getLogger()
self._log.addHandler(self._log_handler)
+ return log_file
def test_unhandled_type_warning(self):
"""Raw text without magic is ignored but shows warning"""
+ ci = stages.Init()
+ data = "arbitrary text\n"
+ ci.datasource = FakeDataSource(data)
+
+ self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mocker.replay()
- ci = cloudinit.CloudInit()
- ci.datasource = FakeDataSource("arbitrary text\n")
+
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
ci.consume_userdata()
- self.assertEqual(
- "Unhandled non-multipart userdata starting 'arbitrary text...'\n",
- self.log_file.getvalue())
+ self.assertIn(
+ "Unhandled non-multipart (text/x-not-multipart) userdata:",
+ log_file.getvalue())
def test_mime_text_plain(self):
- """Mime message of type text/plain is ignored without warning"""
- self.mocker.replay()
- ci = cloudinit.CloudInit()
+ """Mime message of type text/plain is ignored but shows warning"""
+ ci = stages.Init()
message = MIMEBase("text", "plain")
message.set_payload("Just text")
ci.datasource = FakeDataSource(message.as_string())
+
+ self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ self.mocker.replay()
+
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
ci.consume_userdata()
- self.assertEqual("", self.log_file.getvalue())
+ self.assertIn(
+ "Unhandled unknown content-type (text/plain)",
+ log_file.getvalue())
+
def test_shellscript(self):
"""Raw text starting #!/bin/sh is treated as script"""
+ ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
- outpath = cloudinit.get_ipath_cur("scripts") + "/part-001"
+ ci.datasource = FakeDataSource(script)
+
+ outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
+ self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
self.mock_write(outpath, script, 0700)
self.mocker.replay()
- ci = cloudinit.CloudInit()
- ci.datasource = FakeDataSource(script)
+
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
ci.consume_userdata()
- self.assertEqual("", self.log_file.getvalue())
+ self.assertEqual("", log_file.getvalue())
def test_mime_text_x_shellscript(self):
"""Mime message of type text/x-shellscript is treated as script"""
+ ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
- outpath = cloudinit.get_ipath_cur("scripts") + "/part-001"
- self.mock_write(outpath, script, 0700)
- self.mocker.replay()
- ci = cloudinit.CloudInit()
message = MIMEBase("text", "x-shellscript")
message.set_payload(script)
ci.datasource = FakeDataSource(message.as_string())
+
+ outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
+ self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ self.mock_write(outpath, script, 0700)
+ self.mocker.replay()
+
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
ci.consume_userdata()
- self.assertEqual("", self.log_file.getvalue())
+ self.assertEqual("", log_file.getvalue())
def test_mime_text_plain_shell(self):
"""Mime type text/plain starting #!/bin/sh is treated as script"""
+ ci = stages.Init()
script = "#!/bin/sh\necho hello\n"
- outpath = cloudinit.get_ipath_cur("scripts") + "/part-001"
- self.mock_write(outpath, script, 0700)
- self.mocker.replay()
- ci = cloudinit.CloudInit()
message = MIMEBase("text", "plain")
message.set_payload(script)
ci.datasource = FakeDataSource(message.as_string())
+
+ outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
+ self.mock_write(outpath, script, 0700)
+ self.mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+ self.mocker.replay()
+
+ log_file = self.capture_log(logging.WARNING)
+ ci.fetch()
ci.consume_userdata()
- self.assertEqual("", self.log_file.getvalue())
+ self.assertEqual("", log_file.getvalue())
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index e8f5885c..93979f06 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -1,28 +1,45 @@
-from unittest import TestCase
-from mocker import MockerTestCase
-from tempfile import mkdtemp
-from shutil import rmtree
import os
import stat
-from cloudinit.util import (mergedict, get_cfg_option_list_or_str, write_file,
- delete_dir_contents, get_cmdline,
- keyval_str_to_dict)
+from unittest import TestCase
+from mocker import MockerTestCase
+
+from cloudinit import util
+from cloudinit import importer
+
+
+class FakeSelinux(object):
+
+ def __init__(self, match_what):
+ self.match_what = match_what
+ self.restored = []
+
+ def matchpathcon(self, path, mode):
+ if path == self.match_what:
+ return
+ else:
+ raise OSError("No match!")
+ def is_selinux_enabled(self):
+ return True
-class TestMergeDict(TestCase):
+ def restorecon(self, path, recursive):
+ self.restored.append(path)
+
+
+class TestMergeDict(MockerTestCase):
def test_simple_merge(self):
"""Test simple non-conflict merge."""
source = {"key1": "value1"}
candidate = {"key2": "value2"}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual({"key1": "value1", "key2": "value2"}, result)
def test_nested_merge(self):
"""Test nested merge."""
source = {"key1": {"key1.1": "value1.1"}}
candidate = {"key1": {"key1.2": "value1.2"}}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(
{"key1": {"key1.1": "value1.1", "key1.2": "value1.2"}}, result)
@@ -30,42 +47,42 @@ class TestMergeDict(TestCase):
"""Test that candidate doesn't override source."""
source = {"key1": "value1", "key2": "value2"}
candidate = {"key1": "value2", "key2": "NEW VALUE"}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(source, result)
def test_empty_candidate(self):
"""Test empty candidate doesn't change source."""
source = {"key": "value"}
candidate = {}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(source, result)
def test_empty_source(self):
"""Test empty source is replaced by candidate."""
source = {}
candidate = {"key": "value"}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(candidate, result)
def test_non_dict_candidate(self):
"""Test non-dict candidate is discarded."""
source = {"key": "value"}
candidate = "not a dict"
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(source, result)
def test_non_dict_source(self):
"""Test non-dict source is not modified with a dict candidate."""
source = "not a dict"
candidate = {"key": "value"}
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(source, result)
def test_neither_dict(self):
"""Test if neither candidate or source is dict source wins."""
source = "source"
candidate = "candidate"
- result = mergedict(source, candidate)
+ result = util.mergedict(source, candidate)
self.assertEqual(source, result)
@@ -73,51 +90,45 @@ class TestGetCfgOptionListOrStr(TestCase):
def test_not_found_no_default(self):
"""None is returned if key is not found and no default given."""
config = {}
- result = get_cfg_option_list_or_str(config, "key")
- self.assertIsNone(result)
+ result = util.get_cfg_option_list(config, "key")
+ self.assertEqual(None, result)
def test_not_found_with_default(self):
"""Default is returned if key is not found."""
config = {}
- result = get_cfg_option_list_or_str(config, "key", default=["DEFAULT"])
+ result = util.get_cfg_option_list(config, "key", default=["DEFAULT"])
self.assertEqual(["DEFAULT"], result)
def test_found_with_default(self):
"""Default is not returned if key is found."""
config = {"key": ["value1"]}
- result = get_cfg_option_list_or_str(config, "key", default=["DEFAULT"])
+ result = util.get_cfg_option_list(config, "key", default=["DEFAULT"])
self.assertEqual(["value1"], result)
def test_found_convert_to_list(self):
"""Single string is converted to one element list."""
config = {"key": "value1"}
- result = get_cfg_option_list_or_str(config, "key")
+ result = util.get_cfg_option_list(config, "key")
self.assertEqual(["value1"], result)
def test_value_is_none(self):
"""If value is None empty list is returned."""
config = {"key": None}
- result = get_cfg_option_list_or_str(config, "key")
+ result = util.get_cfg_option_list(config, "key")
self.assertEqual([], result)
class TestWriteFile(MockerTestCase):
def setUp(self):
super(TestWriteFile, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = mkdtemp(prefix="unittest_")
-
- def tearDown(self):
- super(TestWriteFile, self).tearDown()
- # Clean up temp directory
- rmtree(self.tmp)
+ self.tmp = self.makeDir(prefix="unittest_")
def test_basic_usage(self):
"""Verify basic usage with default args."""
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- write_file(path, contents)
+ util.write_file(path, contents)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
@@ -133,7 +144,7 @@ class TestWriteFile(MockerTestCase):
path = os.path.join(dirname, "NewFile.txt")
contents = "Hey there"
- write_file(path, contents)
+ util.write_file(path, contents)
self.assertTrue(os.path.isdir(dirname))
self.assertTrue(os.path.isfile(path))
@@ -143,7 +154,7 @@ class TestWriteFile(MockerTestCase):
path = os.path.join(self.tmp, "NewFile.txt")
contents = "Hey there"
- write_file(path, contents, mode=0666)
+ util.write_file(path, contents, mode=0666)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
@@ -158,7 +169,7 @@ class TestWriteFile(MockerTestCase):
# Create file first with basic content
with open(path, "wb") as f:
f.write("LINE1\n")
- write_file(path, contents, omode="a")
+ util.write_file(path, contents, omode="a")
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(path))
@@ -167,36 +178,30 @@ class TestWriteFile(MockerTestCase):
self.assertEqual("LINE1\nHey there", create_contents)
def test_restorecon_if_possible_is_called(self):
- """Make sure the restorecon_if_possible is called correctly."""
- path = os.path.join(self.tmp, "NewFile.txt")
- contents = "Hey there"
-
- # Mock out the restorecon_if_possible call to test if it's called.
- mock_restorecon = self.mocker.replace(
- "cloudinit.util.restorecon_if_possible", passthrough=False)
- mock_restorecon(path)
+ """Make sure the selinux guard is called correctly."""
+ import_mock = self.mocker.replace(importer.import_module,
+ passthrough=False)
+ import_mock('selinux')
+ fake_se = FakeSelinux('/etc/hosts')
+ self.mocker.result(fake_se)
self.mocker.replay()
+ with util.SeLinuxGuard("/etc/hosts") as is_on:
+ self.assertTrue(is_on)
+ self.assertEqual(1, len(fake_se.restored))
+ self.assertEqual('/etc/hosts', fake_se.restored[0])
- write_file(path, contents)
-
-class TestDeleteDirContents(TestCase):
+class TestDeleteDirContents(MockerTestCase):
def setUp(self):
super(TestDeleteDirContents, self).setUp()
- # Make a temp directoy for tests to use.
- self.tmp = mkdtemp(prefix="unittest_")
-
- def tearDown(self):
- super(TestDeleteDirContents, self).tearDown()
- # Clean up temp directory
- rmtree(self.tmp)
+ self.tmp = self.makeDir(prefix="unittest_")
def assertDirEmpty(self, dirname):
self.assertEqual([], os.listdir(dirname))
def test_does_not_delete_dir(self):
"""Ensure directory itself is not deleted."""
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertTrue(os.path.isdir(self.tmp))
self.assertDirEmpty(self.tmp)
@@ -206,7 +211,7 @@ class TestDeleteDirContents(TestCase):
with open(os.path.join(self.tmp, "new_file.txt"), "wb") as f:
f.write("DELETE ME")
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertDirEmpty(self.tmp)
@@ -214,7 +219,7 @@ class TestDeleteDirContents(TestCase):
"""Empty directories should be deleted."""
os.mkdir(os.path.join(self.tmp, "new_dir"))
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertDirEmpty(self.tmp)
@@ -223,7 +228,7 @@ class TestDeleteDirContents(TestCase):
os.mkdir(os.path.join(self.tmp, "new_dir"))
os.mkdir(os.path.join(self.tmp, "new_dir", "new_subdir"))
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertDirEmpty(self.tmp)
@@ -234,7 +239,7 @@ class TestDeleteDirContents(TestCase):
with open(f_name, "wb") as f:
f.write("DELETE ME")
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertDirEmpty(self.tmp)
@@ -246,7 +251,7 @@ class TestDeleteDirContents(TestCase):
f.write("DELETE ME")
os.symlink(file_name, link_name)
- delete_dir_contents(self.tmp)
+ util.delete_dir_contents(self.tmp)
self.assertDirEmpty(self.tmp)
@@ -255,12 +260,12 @@ class TestKeyValStrings(TestCase):
def test_keyval_str_to_dict(self):
expected = {'1': 'one', '2': 'one+one', 'ro': True}
cmdline = "1=one ro 2=one+one"
- self.assertEqual(expected, keyval_str_to_dict(cmdline))
+ self.assertEqual(expected, util.keyval_str_to_dict(cmdline))
class TestGetCmdline(TestCase):
def test_cmdline_reads_debug_env(self):
os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123'
- self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], get_cmdline())
+ self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline())
# vi: ts=4 expandtab
diff --git a/tools/bddeb b/tools/bddeb
deleted file mode 100755
index 598f71bb..00000000
--- a/tools/bddeb
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/bin/sh
-
-TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXXX")
-#TEMP_D=/tmp/my.d
-start=${PWD}
-rm -Rf "${TEMP_D}"; mkdir "${TEMP_D}"
-set -e
-trap "rm -Rf '${TEMP_D}'" exit
-files=$(bzr ls --versioned)
-revno=$(bzr revno)
-version=$(awk \
- -F= '$1 ~ /version$/ { gsub("[^0-9.]","",$2); print $2; }' setup.py)
-mkdir "${TEMP_D}/cloud-init"
-otar="$TEMP_D/cloud-init_$version~bzr${revno}.orig.tar.gz"
-tar -czf - ${files} > "$otar"
-tar -C "${TEMP_D}/cloud-init" -xzf - <"$otar"
-
-if [ ! -d "${TEMP_D}/cloud-init/debian" ]; then
- rsync -a debian.trunk/ "${TEMP_D}/cloud-init/debian"
-fi
-sed -i -e "s,VERSION,$version," -e "s,REVNO,bzr$revno," \
- "$TEMP_D/cloud-init/debian/changelog"
-cd "${TEMP_D}/cloud-init"
-debuild "$@"
-#for x in ../*.deb; do
-# echo wrote ${x##*/}
-#done
-debname="cloud-init_${version}~bzr${revno}-0_all.deb"
-mv "../$debname" "$start"
-link="$start/cloud-init_all.deb"
-echo "wrote $debname"
-[ ! -e "$link" -o -L "$link" ]
- { ln -sf "$debname" "$link" && echo "linked ${link##*/}"; }
diff --git a/tools/hacking.py b/tools/hacking.py
new file mode 100755
index 00000000..d0c27d25
--- /dev/null
+++ b/tools/hacking.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012, Cloudscaling
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""cloudinit HACKING file compliance testing (based off of nova hacking.py)
+
+built on top of pep8.py
+"""
+
+import inspect
+import logging
+import os
+import re
+import sys
+import tokenize
+import warnings
+
+import pep8
+
+# Don't need this for testing
+logging.disable('LOG')
+
+# N1xx comments
+# N2xx except
+# N3xx imports
+# N4xx docstrings
+# N[5-9]XX (future use)
+
+DOCSTRING_TRIPLE = ['"""', "'''"]
+VERBOSE_MISSING_IMPORT = False
+_missingImport = set([])
+
+
+def import_normalize(line):
+ # convert "from x import y" to "import x.y"
+ # handle "from x import y as z" to "import x.y as z"
+ split_line = line.split()
+ if (line.startswith("from ") and "," not in line and
+ split_line[2] == "import" and split_line[3] != "*" and
+ split_line[1] != "__future__" and
+ (len(split_line) == 4 or
+ (len(split_line) == 6 and split_line[4] == "as"))):
+ return "import %s.%s" % (split_line[1], split_line[3])
+ else:
+ return line
+
+
+def cloud_import_alphabetical(physical_line, line_number, lines):
+ """Check for imports in alphabetical order.
+
+ HACKING guide recommendation for imports:
+ imports in human alphabetical order
+ N306
+ """
+ # handle import x
+ # use .lower since capitalization shouldn't dictate order
+ split_line = import_normalize(physical_line.strip()).lower().split()
+ split_previous = import_normalize(lines[line_number - 2]
+ ).strip().lower().split()
+ # with or without "as y"
+ length = [2, 4]
+ if (len(split_line) in length and len(split_previous) in length and
+ split_line[0] == "import" and split_previous[0] == "import"):
+ if split_line[1] < split_previous[1]:
+ return (0, "N306: imports not in alphabetical order (%s, %s)"
+ % (split_previous[1], split_line[1]))
+
+
+def cloud_docstring_start_space(physical_line):
+ """Check for docstring not start with space.
+
+ HACKING guide recommendation for docstring:
+ Docstring should not start with space
+ N401
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ if (pos != -1 and len(physical_line) > pos + 1):
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "N401: one line docstring should not start with"
+ " a space")
+
+
+def cloud_todo_format(physical_line):
+ """Check for 'TODO()'.
+
+ HACKING guide recommendation for TODO:
+ Include your name with TODOs as in "#TODO(termie)"
+ N101
+ """
+ pos = physical_line.find('TODO')
+ pos1 = physical_line.find('TODO(')
+ pos2 = physical_line.find('#') # make sure it's a comment
+ if (pos != pos1 and pos2 >= 0 and pos2 < pos):
+ return pos, "N101: Use TODO(NAME)"
+
+
+def cloud_docstring_one_line(physical_line):
+ """Check one line docstring end.
+
+ HACKING guide recommendation for one line docstring:
+ A one line docstring looks like this and ends in a period.
+ N402
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
+ if (pos != -1 and end and len(physical_line) > pos + 4):
+ if (physical_line[-5] != '.'):
+ return pos, "N402: one line docstring needs a period"
+
+
+def cloud_docstring_multiline_end(physical_line):
+ """Check multi line docstring end.
+
+ HACKING guide recommendation for docstring:
+ Docstring should end on a new line
+ N403
+ """
+ pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
+ if (pos != -1 and len(physical_line) == pos):
+ print physical_line
+ if (physical_line[pos + 3] == ' '):
+ return (pos, "N403: multi line docstring end on new line")
+
+
+
+current_file = ""
+
+
+def readlines(filename):
+ """Record the current file being tested."""
+ pep8.current_file = filename
+ return open(filename).readlines()
+
+
+def add_cloud():
+ """Monkey patch pep8 for cloud-init guidelines.
+
+ Look for functions that start with cloud_
+ and add them to pep8 module.
+
+ Assumes you know how to write pep8.py checks
+ """
+ for name, function in globals().items():
+ if not inspect.isfunction(function):
+ continue
+ if name.startswith("cloud_"):
+ exec("pep8.%s = %s" % (name, name))
+
+if __name__ == "__main__":
+ # NOVA based 'hacking.py' error codes start with an N
+ pep8.ERRORCODE_REGEX = re.compile(r'[EWN]\d{3}')
+ add_cloud()
+ pep8.current_file = current_file
+ pep8.readlines = readlines
+ try:
+ pep8._main()
+ finally:
+ if len(_missingImport) > 0:
+ print >> sys.stderr, ("%i imports missing in this test environment"
+ % len(_missingImport))
+
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
new file mode 100755
index 00000000..4548e4ae
--- /dev/null
+++ b/tools/mock-meta.py
@@ -0,0 +1,444 @@
+#!/usr/bin/python
+
+# Provides a somewhat random, somewhat compat, somewhat useful mock version of
+#
+# http://docs.amazonwebservices.com/AWSEC2/2007-08-29/DeveloperGuide/AESDG-chapter-instancedata.html
+
+"""
+To use this to mimic the EC2 metadata service entirely, run it like:
+ # Where 'eth0' is *some* interface.
+ sudo ifconfig eth0:0 169.254.169.254 netmask 255.255.255.255
+
+ sudo ./mock-meta -a 169.254.169.254 -p 80
+
+Then:
+ wget -q http://169.254.169.254/latest/meta-data/instance-id -O -; echo
+ curl --silent http://169.254.169.254/latest/meta-data/instance-id ; echo
+ ec2metadata --instance-id
+"""
+
+import functools
+import httplib
+import json
+import logging
+import os
+import random
+import string
+import sys
+import yaml
+
+from optparse import OptionParser
+
+from BaseHTTPServer import (HTTPServer, BaseHTTPRequestHandler)
+
+log = logging.getLogger('meta-server')
+
+EC2_VERSIONS = [
+ '1.0',
+ '2007-01-19',
+ '2007-03-01',
+ '2007-08-29',
+ '2007-10-10',
+ '2007-12-15',
+ '2008-02-01',
+ '2008-09-01',
+ '2009-04-04',
+]
+
+BLOCK_DEVS = [
+ 'ami',
+ 'ephemeral0',
+ 'root',
+]
+
+DEV_PREFIX = 'v' # This seems to vary alot depending on images...
+DEV_MAPPINGS = {
+ 'ephemeral0': '%sda2' % (DEV_PREFIX),
+ 'root': '/dev/%sda1' % (DEV_PREFIX),
+ 'ami': '%sda1' % (DEV_PREFIX),
+ 'swap': '%sda3' % (DEV_PREFIX),
+}
+
+META_CAPABILITIES = [
+ 'aki-id',
+ 'ami-id',
+ 'ami-launch-index',
+ 'ami-manifest-path',
+ 'ari-id',
+ 'block-device-mapping/',
+ 'hostname',
+ 'instance-action',
+ 'instance-id',
+ 'instance-type',
+ 'local-hostname',
+ 'local-ipv4',
+ 'placement/',
+ 'product-codes',
+ 'public-hostname',
+ 'public-ipv4',
+ 'public-keys/',
+ 'reservation-id',
+ 'security-groups'
+]
+
+PUB_KEYS = {
+ 'brickies': [
+ ('ssh-rsa '
+ 'AAAAB3NzaC1yc2EAAAABIwAAAQEA3I7VUf2l5gSn5uavROsc5HRDpZdQueUq5ozemNSj8T'
+ '7enqKHOEaFoU2VoPgGEWC9RyzSQVeyD6s7APMcE82EtmW4skVEgEGSbDc1pvxzxtchBj78'
+ 'hJP6Cf5TCMFSXw+Fz5rF1dR23QDbN1mkHs7adr8GW4kSWqU7Q7NDwfIrJJtO7Hi42GyXtv'
+ 'EONHbiRPOe8stqUly7MvUoN+5kfjBM8Qqpfl2+FNhTYWpMfYdPUnE7u536WqzFmsaqJctz'
+ '3gBxH9Ex7dFtrxR4qiqEr9Qtlu3xGn7Bw07/+i1D+ey3ONkZLN+LQ714cgj8fRS4Hj29SC'
+ 'mXp5Kt5/82cD/VN3NtHw== brickies'),
+ '',
+ ],
+}
+
+INSTANCE_TYPES = [
+ 'm1.large',
+ 'm1.medium',
+ 'm1.small',
+ 'm1.xlarge',
+]
+
+AVAILABILITY_ZONES = [
+ "us-east-1a",
+ "us-east-1b",
+ "us-east-1c",
+ "us-east-1d",
+ 'eu-west-1a',
+ 'eu-west-1b',
+ 'us-west-1',
+]
+
+PLACEMENT_CAPABILITIES = {
+ 'availability-zone': AVAILABILITY_ZONES,
+}
+
+NOT_IMPL_RESPONSE = json.dumps({})
+
+
+class WebException(Exception):
+ def __init__(self, code, msg):
+ Exception.__init__(self, msg)
+ self.code = code
+
+
+def yamlify(data):
+ formatted = yaml.dump(data,
+ line_break="\n",
+ indent=4,
+ explicit_start=True,
+ explicit_end=True,
+ default_flow_style=False)
+ return formatted
+
+
+def format_text(text):
+ if not len(text):
+ return "<<"
+ lines = text.splitlines()
+ nlines = []
+ for line in lines:
+ nlines.append("<< %s" % line)
+ return "\n".join(nlines)
+
+
+def traverse(keys, mp):
+ result = dict(mp)
+ for k in keys:
+ try:
+ result = result.get(k)
+ except (AttributeError, TypeError):
+ result = None
+ break
+ return result
+
+
+ID_CHARS = [c for c in (string.ascii_uppercase + string.digits)]
+def id_generator(size=6, lower=False):
+ txt = ''.join(random.choice(ID_CHARS) for x in range(size))
+ if lower:
+ return txt.lower()
+ else:
+ return txt
+
+
+def get_ssh_keys():
+ keys = {}
+ keys.update(PUB_KEYS)
+
+ # Nice helper to add in the 'running' users key (if they have one)
+ key_pth = os.path.expanduser('~/.ssh/id_rsa.pub')
+ if not os.path.isfile(key_pth):
+ key_pth = os.path.expanduser('~/.ssh/id_dsa.pub')
+
+ if os.path.isfile(key_pth):
+ with open(key_pth, 'rb') as fh:
+ contents = fh.read()
+ keys[os.getlogin()] = [contents, '']
+
+ return keys
+
+
+class MetaDataHandler(object):
+
+ def __init__(self, opts):
+ self.opts = opts
+ self.instances = {}
+
+ def get_data(self, params, who, **kwargs):
+ if not params:
+ # Show the root level capabilities when
+ # no params are passed...
+ caps = sorted(META_CAPABILITIES)
+ return "\n".join(caps)
+ action = params[0]
+ action = action.lower()
+ if action == 'instance-id':
+ return 'i-%s' % (id_generator(lower=True))
+ elif action == 'ami-launch-index':
+ return "%s" % random.choice([0, 1, 2, 3])
+ elif action == 'aki-id':
+ return 'aki-%s' % (id_generator(lower=True))
+ elif action == 'ami-id':
+ return 'ami-%s' % (id_generator(lower=True))
+ elif action == 'ari-id':
+ return 'ari-%s' % (id_generator(lower=True))
+ elif action == 'block-device-mapping':
+ nparams = params[1:]
+ if not nparams:
+ return "\n".join(BLOCK_DEVS)
+ else:
+ subvalue = traverse(nparams, DEV_MAPPINGS)
+ if not subvalue:
+ return "\n".join(sorted(list(DEV_MAPPINGS.keys())))
+ else:
+ return str(subvalue)
+ elif action in ['hostname', 'local-hostname', 'public-hostname']:
+ # Just echo back there own hostname that they called in on..
+ return "%s" % (who)
+ elif action == 'instance-type':
+ return random.choice(INSTANCE_TYPES)
+ elif action == 'ami-manifest-path':
+ return 'my-amis/spamd-image.manifest.xml'
+ elif action == 'security-groups':
+ return 'default'
+ elif action in ['local-ipv4', 'public-ipv4']:
+ # Just echo back there own ip that they called in on...
+ return "%s" % (kwargs.get('client_ip', '10.0.0.1'))
+ elif action == 'reservation-id':
+ return "r-%s" % (id_generator(lower=True))
+ elif action == 'product-codes':
+ return "%s" % (id_generator(size=8))
+ elif action == 'public-keys':
+ nparams = params[1:]
+ # This is a weird kludge, why amazon why!!!
+ # public-keys is messed up, a list of /latest/meta-data/public-keys/
+ # shows something like: '0=brickies'
+ # but a GET to /latest/meta-data/public-keys/0=brickies will fail
+ # you have to know to get '/latest/meta-data/public-keys/0', then
+ # from there you get a 'openssh-key', which you can get.
+ # this hunk of code just re-works the object for that.
+ avail_keys = get_ssh_keys()
+ key_ids = sorted(list(avail_keys.keys()))
+ if nparams:
+ mybe_key = nparams[0]
+ try:
+ key_id = int(mybe_key)
+ key_name = key_ids[key_id]
+ except:
+ raise WebException(httplib.BAD_REQUEST, "Unknown key id %r" % mybe_key)
+ # Extract the possible sub-params
+ result = traverse(nparams[1:], {
+ "openssh-key": "\n".join(avail_keys[key_name]),
+ })
+ if isinstance(result, (dict)):
+ # TODO: This might not be right??
+ result = "\n".join(sorted(result.keys()))
+ if not result:
+ result = ''
+ return result
+ else:
+ contents = []
+ for (i, key_id) in enumerate(key_ids):
+ contents.append("%s=%s" % (i, key_id))
+ return "\n".join(contents)
+ elif action == 'placement':
+ nparams = params[1:]
+ if not nparams:
+ pcaps = sorted(PLACEMENT_CAPABILITIES.keys())
+ return "\n".join(pcaps)
+ else:
+ pentry = nparams[0].strip().lower()
+ if pentry == 'availability-zone':
+ zones = PLACEMENT_CAPABILITIES[pentry]
+ return "%s" % random.choice(zones)
+ else:
+ return "%s" % (PLACEMENT_CAPABILITIES.get(pentry, ''))
+ else:
+ log.warn(("Did not implement action %s, "
+ "returning empty response: %r"),
+ action, NOT_IMPL_RESPONSE)
+ return NOT_IMPL_RESPONSE
+
+
+class UserDataHandler(object):
+
+ def __init__(self, opts):
+ self.opts = opts
+
+ def _get_user_blob(self, **kwargs):
+ blob = None
+ if self.opts['user_data_file'] is not None:
+ blob = self.opts['user_data_file']
+ if not blob:
+ blob_mp = {
+ 'hostname': kwargs.get('who', 'localhost'),
+ }
+ lines = [
+ "#cloud-config",
+ yamlify(blob_mp),
+ ]
+ blob = "\n".join(lines)
+ return blob.strip()
+
+ def get_data(self, params, who, **kwargs):
+ if not params:
+ return self._get_user_blob(who=who)
+ return NOT_IMPL_RESPONSE
+
+
+# Seem to need to use globals since can't pass
+# data into the request handlers instances...
+# Puke!
+meta_fetcher = None
+user_fetcher = None
+
+
+class Ec2Handler(BaseHTTPRequestHandler):
+
+ def _get_versions(self):
+ versions = ['latest'] + EC2_VERSIONS
+ versions = sorted(versions)
+ return "\n".join(versions)
+
+ def log_message(self, format, *args):
+ msg = "%s - %s" % (self.address_string(), format % (args))
+ log.info(msg)
+
+ def _find_method(self, path):
+ # Puke! (globals)
+ global meta_fetcher
+ global user_fetcher
+ func_mapping = {
+ 'user-data': user_fetcher.get_data,
+ 'meta-data': meta_fetcher.get_data,
+ }
+ segments = [piece for piece in path.split('/') if len(piece)]
+ log.info("Received segments %s", segments)
+ if not segments:
+ return self._get_versions
+ date = segments[0].strip().lower()
+ if date not in self._get_versions():
+ raise WebException(httplib.BAD_REQUEST, "Unknown version format %r" % date)
+ if len(segments) < 2:
+ raise WebException(httplib.BAD_REQUEST, "No action provided")
+ look_name = segments[1].lower()
+ if look_name not in func_mapping:
+ raise WebException(httplib.BAD_REQUEST, "Unknown requested data %r" % look_name)
+ base_func = func_mapping[look_name]
+ who = self.address_string()
+ ip_from = self.client_address[0]
+ if who == ip_from:
+ # Nothing resolved, so just use 'localhost'
+ who = 'localhost'
+ kwargs = {
+ 'params': list(segments[2:]),
+ 'who': who,
+ 'client_ip': ip_from,
+ }
+ return functools.partial(base_func, **kwargs)
+
+ def _do_response(self):
+ who = self.client_address
+ log.info("Got a call from %s for path %s", who, self.path)
+ try:
+ func = self._find_method(self.path)
+ data = func()
+ if not data:
+ data = ''
+ self.send_response(httplib.OK)
+ self.send_header("Content-Type", "binary/octet-stream")
+ self.send_header("Content-Length", len(data))
+ log.info("Sending data (len=%s):\n%s", len(data), format_text(data))
+ self.end_headers()
+ self.wfile.write(data)
+ except RuntimeError as e:
+ log.exception("Error somewhere in the server.")
+ self.send_error(httplib.INTERNAL_SERVER_ERROR, message=str(e))
+ except WebException as e:
+ code = e.code
+ log.exception(str(e))
+ self.send_error(code, message=str(e))
+
+ def do_GET(self):
+ self._do_response()
+
+ def do_POST(self):
+ self._do_response()
+
+
+def setup_logging(log_level, format='%(levelname)s: @%(name)s : %(message)s'):
+ root_logger = logging.getLogger()
+ console_logger = logging.StreamHandler(sys.stdout)
+ console_logger.setFormatter(logging.Formatter(format))
+ root_logger.addHandler(console_logger)
+ root_logger.setLevel(log_level)
+
+
+def extract_opts():
+ parser = OptionParser()
+ parser.add_option("-p", "--port", dest="port", action="store", type=int, default=80,
+ help="port from which to serve traffic (default: %default)", metavar="PORT")
+ parser.add_option("-a", "--addr", dest="address", action="store", type=str, default='0.0.0.0',
+ help="address from which to serve traffic (default: %default)", metavar="ADDRESS")
+ parser.add_option("-f", '--user-data-file', dest='user_data_file', action='store',
+ help="user data filename to serve back to incoming requests", metavar='FILE')
+ (options, args) = parser.parse_args()
+ out = dict()
+ out['extra'] = args
+ out['port'] = options.port
+ out['user_data_file'] = None
+ out['address'] = options.address
+ if options.user_data_file:
+ if not os.path.isfile(options.user_data_file):
+ parser.error("Option -f specified a non-existent file")
+ with open(options.user_data_file, 'rb') as fh:
+ out['user_data_file'] = fh.read()
+ return out
+
+
+def setup_fetchers(opts):
+ global meta_fetcher
+ global user_fetcher
+ meta_fetcher = MetaDataHandler(opts)
+ user_fetcher = UserDataHandler(opts)
+
+
+def run_server():
+ # Using global here since it doesn't seem like we
+ # can pass opts into a request handler constructor...
+ opts = extract_opts()
+ setup_logging(logging.DEBUG)
+ setup_fetchers(opts)
+ log.info("CLI opts: %s", opts)
+ server_address = (opts['address'], opts['port'])
+ server = HTTPServer(server_address, Ec2Handler)
+ sa = server.socket.getsockname()
+ log.info("Serving ec2 metadata on %s using port %s ...", sa[0], sa[1])
+ server.serve_forever()
+
+
+if __name__ == '__main__':
+ run_server()
diff --git a/tools/read-dependencies b/tools/read-dependencies
new file mode 100755
index 00000000..72e1e095
--- /dev/null
+++ b/tools/read-dependencies
@@ -0,0 +1,45 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import os
+import sys
+import re
+
+
+def parse_requires(fn):
+ requires = []
+ with open(fn, 'r') as fh:
+ lines = fh.read().splitlines()
+ for line in lines:
+ line = line.strip()
+ if not line or line[0] == '#':
+ continue
+ else:
+ requires.append(line)
+ return requires
+
+
+def find_requires(args):
+ p_files = []
+ if args:
+ p_files.append(args[0])
+ p_files.append(os.path.join(os.pardir, "Requires"))
+ p_files.append(os.path.join(os.getcwd(), 'Requires'))
+ found = None
+ for fn in p_files:
+ if os.path.isfile(fn):
+ found = fn
+ break
+ return found
+
+
+if __name__ == '__main__':
+ run_args = sys.argv[1:]
+ fn = find_requires(run_args)
+ if not fn:
+ sys.stderr.write("'Requires' file not found!\n")
+ sys.exit(1)
+ else:
+ deps = parse_requires(fn)
+ for entry in deps:
+ print entry
diff --git a/tools/read-version b/tools/read-version
new file mode 100755
index 00000000..e6167a2c
--- /dev/null
+++ b/tools/read-version
@@ -0,0 +1,70 @@
+#!/usr/bin/python
+# vi: ts=4 expandtab
+
+import os
+import sys
+import re
+
+from distutils import version as ver
+
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "cloudinit", "__init__.py")):
+ sys.path.insert(0, possible_topdir)
+
+from cloudinit import version as cver
+
+def parse_versions(fn):
+ with open(fn, 'r') as fh:
+ lines = fh.read().splitlines()
+ versions = []
+ for line in lines:
+ line = line.strip()
+ if line.startswith("-") or not line:
+ continue
+ if not re.match(r"[\d]", line):
+ continue
+ line = line.strip(":")
+ if (re.match(r"^[\d+]\.[\d+]\.[\d+]$", line) or
+ re.match(r"^[\d+]\.[\d+]$", line)):
+ versions.append(line)
+ return versions
+
+def find_changelog(args):
+ p_files = []
+ if args:
+ p_files.append(args[0])
+ p_files.append(os.path.join(os.pardir, "ChangeLog"))
+ p_files.append(os.path.join(os.getcwd(), 'ChangeLog'))
+ found = None
+ for fn in p_files:
+ if os.path.isfile(fn):
+ found = fn
+ break
+ return found
+
+
+if __name__ == '__main__':
+ run_args = sys.argv[1:]
+ fn = find_changelog(run_args)
+ if not fn:
+ sys.stderr.write("'ChangeLog' file not found!\n")
+ sys.exit(1)
+ else:
+ versions = parse_versions(fn)
+ if not versions:
+ sys.stderr.write("No versions found in %s!\n" % (fn))
+ sys.exit(1)
+ else:
+ # Check that the code version is the same
+ # as the version we found!
+ ch_ver = versions[0].strip()
+ code_ver = cver.version()
+ ch_ver_obj = ver.StrictVersion(ch_ver)
+ if ch_ver_obj != code_ver:
+ sys.stderr.write(("Code version %s does not match"
+ " changelog version %s\n") %
+ (code_ver, ch_ver_obj))
+ sys.exit(1)
+ sys.stdout.write(ch_ver)
+ sys.exit(0)
diff --git a/tools/run-pep8 b/tools/run-pep8
new file mode 100755
index 00000000..ea46c117
--- /dev/null
+++ b/tools/run-pep8
@@ -0,0 +1,35 @@
+#!/bin/bash
+
+ci_files='cloud*.py cloudinit/*.py cloudinit/config/*.py'
+test_files=$(find tests -name "*.py")
+def_files="$ci_files $test_files"
+
+if [ $# -eq 0 ]; then
+ files=( )
+ for f in $def_files; do
+ [ -f "$f" ] || { echo "failed, $f not a file" 1>&2; exit 1; }
+ files[${#files[@]}]=${f}
+ done
+else
+ files=( "$@" );
+fi
+
+if [ -f 'hacking.py' ]
+then
+ base=`pwd`
+else
+ base=`pwd`/tools/
+fi
+
+cmd=(
+ ${base}/hacking.py
+
+ --ignore=E501 # Line too long (these are caught by pylint)
+
+ "${files[@]}"
+)
+
+echo -e "\nRunning 'cloudinit' pep8:"
+echo "${cmd[@]}"
+"${cmd[@]}"
+
diff --git a/tools/run-pylint b/tools/run-pylint
index 46748ffb..dd6369aa 100755
--- a/tools/run-pylint
+++ b/tools/run-pylint
@@ -1,6 +1,6 @@
#!/bin/bash
-ci_files='cloud*.py cloudinit/*.py cloudinit/CloudConfig/*.py'
+ci_files='cloud*.py cloudinit/*.py cloudinit/config/*.py'
test_files=$(find tests -name "*.py")
def_files="$ci_files $test_files"
@@ -38,14 +38,3 @@ echo -e "\nRunning pylint:"
echo "${cmd[@]}"
"${cmd[@]}"
-cmd=(
- pep8
-
- --ignore=E501 # Line too long (these are caught by pylint above)
-
- "${files[@]}"
-)
-
-echo -e "\nRunning pep8:"
-echo "${cmd[@]}"
-"${cmd[@]}"
diff --git a/upstart/cloud-config.conf b/upstart/cloud-config.conf
index 5edc58b9..3ac113f3 100644
--- a/upstart/cloud-config.conf
+++ b/upstart/cloud-config.conf
@@ -5,4 +5,4 @@ start on (filesystem and started rsyslog)
console output
task
-exec cloud-init-cfg all config
+exec cloud-init modules --mode=config
diff --git a/upstart/cloud-final.conf b/upstart/cloud-final.conf
index a04105a1..72ae5052 100644
--- a/upstart/cloud-final.conf
+++ b/upstart/cloud-final.conf
@@ -7,4 +7,4 @@ start on (stopped rc RUNLEVEL=[2345] and stopped cloud-config)
console output
task
-exec cloud-init-cfg all final
+exec cloud-init modules --mode=final
diff --git a/upstart/cloud-init-local.conf b/upstart/cloud-init-local.conf
index b6eb21b4..061fe406 100644
--- a/upstart/cloud-init-local.conf
+++ b/upstart/cloud-init-local.conf
@@ -6,4 +6,4 @@ task
console output
-exec /usr/bin/cloud-init start-local
+exec /usr/bin/cloud-init init --local
diff --git a/upstart/cloud-init.conf b/upstart/cloud-init.conf
index b9be5981..41ddd284 100644
--- a/upstart/cloud-init.conf
+++ b/upstart/cloud-init.conf
@@ -6,4 +6,4 @@ task
console output
-exec /usr/bin/cloud-init start
+exec /usr/bin/cloud-init init