summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2014-02-13 06:27:22 -0500
committerScott Moser <smoser@ubuntu.com>2014-02-13 06:27:22 -0500
commitc722a6e9110b8a5dc33de8911cf5734e14dc523f (patch)
tree54acbaf93371fc42f2777f68e79768e45f47a0a7
parent5788cd903f6e4a9bab2ad32e9c1d2eb13b485ac3 (diff)
parentf7ac086a434b511b076346839818de7cf34e18a2 (diff)
downloadvyos-cloud-init-c722a6e9110b8a5dc33de8911cf5734e14dc523f.tar.gz
vyos-cloud-init-c722a6e9110b8a5dc33de8911cf5734e14dc523f.zip
merge from trunk
-rw-r--r--ChangeLog5
-rw-r--r--cloudinit/config/cc_set_passwords.py5
-rw-r--r--cloudinit/config/cc_ssh_import_id.py3
-rw-r--r--cloudinit/cs_utils.py99
-rw-r--r--cloudinit/distros/__init__.py5
-rw-r--r--cloudinit/distros/arch.py219
-rw-r--r--cloudinit/distros/gentoo.py178
-rw-r--r--cloudinit/settings.py1
-rw-r--r--cloudinit/sources/DataSourceAzure.py37
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py91
-rw-r--r--cloudinit/sources/DataSourceEc2.py7
-rw-r--r--cloudinit/stages.py36
-rw-r--r--doc/examples/cloud-config-landscape.txt8
-rw-r--r--doc/examples/cloud-config.txt10
-rw-r--r--doc/sources/cloudsigma/README.rst34
-rw-r--r--packages/redhat/cloud-init.spec.in5
-rw-r--r--packages/suse/cloud-init.spec.in2
-rw-r--r--requirements.txt4
-rw-r--r--sysvinit/gentoo/cloud-config13
-rw-r--r--sysvinit/gentoo/cloud-final11
-rw-r--r--sysvinit/gentoo/cloud-init12
-rw-r--r--sysvinit/gentoo/cloud-init-local13
-rw-r--r--tests/unittests/test_cs_util.py65
-rw-r--r--tests/unittests/test_datasource/test_azure.py83
-rw-r--r--tests/unittests/test_datasource/test_cloudsigma.py59
25 files changed, 958 insertions, 47 deletions
diff --git a/ChangeLog b/ChangeLog
index 6c8fe90a..af53a735 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -23,6 +23,11 @@
theh correct filesystem label. [Paul Querna]
- initial freebsd support [Harm Weites]
- fix in is_ipv4 to accept IP addresses with a '0' in them.
+ - Azure: fix issue when stale data in /var/lib/waagent (LP: #1269626)
+ - skip config_modules that declare themselves only verified on a set of
+ distros. Add them to 'unverified_modules' list to run anyway.
+ - Add CloudSigma datasource [Kiril Vladimiroff]
+ - Add intiial support for Gentoo and Arch distributions [Nate House]
0.7.4:
- fix issue mounting 'ephemeral0' if ephemeral0 was an alias for a
partitioned block device with target filesystem on ephemeral0.1.
diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 56a36906..4a3b21af 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -136,9 +136,12 @@ def handle(_name, cfg, cloud, log, args):
util.write_file(ssh_util.DEF_SSHD_CFG, "\n".join(lines))
try:
- cmd = ['service']
+ cmd = cloud.distro.init_cmd # Default service
cmd.append(cloud.distro.get_option('ssh_svcname', 'ssh'))
cmd.append('restart')
+ if 'systemctl' in cmd: # Switch action ordering
+ cmd[1], cmd[2] = cmd[2], cmd[1]
+ cmd = filter(None, cmd) # Remove empty arguments
util.subp(cmd)
log.debug("Restarted the ssh daemon")
except:
diff --git a/cloudinit/config/cc_ssh_import_id.py b/cloudinit/config/cc_ssh_import_id.py
index 50d96e15..76c1663d 100644
--- a/cloudinit/config/cc_ssh_import_id.py
+++ b/cloudinit/config/cc_ssh_import_id.py
@@ -26,9 +26,8 @@ from cloudinit import distros as ds
from cloudinit import util
import pwd
-# The ssh-import-id only seems to exist on ubuntu (for now)
# https://launchpad.net/ssh-import-id
-distros = ['ubuntu']
+distros = ['ubuntu', 'debian']
def handle(_name, cfg, cloud, log, args):
diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
new file mode 100644
index 00000000..4e53c31a
--- /dev/null
+++ b/cloudinit/cs_utils.py
@@ -0,0 +1,99 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 CloudSigma
+#
+# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+"""
+cepko implements easy-to-use communication with CloudSigma's VMs through
+a virtual serial port without bothering with formatting the messages
+properly nor parsing the output with the specific and sometimes
+confusing shell tools for that purpose.
+
+Having the server definition accessible by the VM can ve useful in various
+ways. For example it is possible to easily determine from within the VM,
+which network interfaces are connected to public and which to private network.
+Another use is to pass some data to initial VM setup scripts, like setting the
+hostname to the VM name or passing ssh public keys through server meta.
+
+For more information take a look at the Server Context section of CloudSigma
+API Docs: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
+"""
+import json
+import platform
+
+import serial
+
+SERIAL_PORT = '/dev/ttyS1'
+if platform.system() == 'Windows':
+ SERIAL_PORT = 'COM2'
+
+
+class Cepko(object):
+ """
+ One instance of that object could be use for one or more
+ queries to the serial port.
+ """
+ request_pattern = "<\n{}\n>"
+
+ def get(self, key="", request_pattern=None):
+ if request_pattern is None:
+ request_pattern = self.request_pattern
+ return CepkoResult(request_pattern.format(key))
+
+ def all(self):
+ return self.get()
+
+ def meta(self, key=""):
+ request_pattern = self.request_pattern.format("/meta/{}")
+ return self.get(key, request_pattern)
+
+ def global_context(self, key=""):
+ request_pattern = self.request_pattern.format("/global_context/{}")
+ return self.get(key, request_pattern)
+
+
+class CepkoResult(object):
+ """
+ CepkoResult executes the request to the virtual serial port as soon
+ as the instance is initialized and stores the result in both raw and
+ marshalled format.
+ """
+ def __init__(self, request):
+ self.request = request
+ self.raw_result = self._execute()
+ self.result = self._marshal(self.raw_result)
+
+ def _execute(self):
+ connection = serial.Serial(SERIAL_PORT)
+ connection.write(self.request)
+ return connection.readline().strip('\x04\n')
+
+ def _marshal(self, raw_result):
+ try:
+ return json.loads(raw_result)
+ except ValueError:
+ return raw_result
+
+ def __len__(self):
+ return self.result.__len__()
+
+ def __getitem__(self, key):
+ return self.result.__getitem__(key)
+
+ def __contains__(self, item):
+ return self.result.__contains__(item)
+
+ def __iter__(self):
+ return self.result.__iter__()
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 46b67fa3..55d6bcbc 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -39,8 +39,10 @@ from cloudinit.distros.parsers import hosts
OSFAMILIES = {
'debian': ['debian', 'ubuntu'],
'redhat': ['fedora', 'rhel'],
+ 'gentoo': ['gentoo'],
'freebsd': ['freebsd'],
- 'suse': ['sles']
+ 'suse': ['sles'],
+ 'arch': ['arch'],
}
LOG = logging.getLogger(__name__)
@@ -53,6 +55,7 @@ class Distro(object):
ci_sudoers_fn = "/etc/sudoers.d/90-cloud-init-users"
hostname_conf_fn = "/etc/hostname"
tz_zone_dir = "/usr/share/zoneinfo"
+ init_cmd = ['service'] # systemctl, service etc
def __init__(self, name, cfg, paths):
self._paths = paths
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
new file mode 100644
index 00000000..310c3dff
--- /dev/null
+++ b/cloudinit/distros/arch.py
@@ -0,0 +1,219 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Rackspace, US Inc.
+#
+# Author: Nate House <nathan.house@rackspace.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.distros import net_util
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ locale_conf_fn = "/etc/locale.gen"
+ network_conf_dir = "/etc/netctl"
+ tz_conf_fn = "/etc/timezone"
+ tz_local_fn = "/etc/localtime"
+ resolve_conf_fn = "/etc/resolv.conf"
+ init_cmd = ['systemctl'] # init scripts
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'arch'
+ cfg['ssh_svcname'] = 'sshd'
+
+ def apply_locale(self, locale, out_fn=None):
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+ util.subp(['locale-gen', '-G', locale], capture=False)
+ # "" provides trailing newline during join
+ lines = [
+ util.make_header(),
+ 'LANG="%s"' % (locale),
+ "",
+ ]
+ util.write_file(out_fn, "\n".join(lines))
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('', pkgs=pkglist)
+
+ def _write_network(self, settings):
+ entries = net_util.translate_network(settings)
+ LOG.debug("Translated ubuntu style network settings %s into %s",
+ settings, entries)
+ dev_names = entries.keys()
+ # Format for netctl
+ for (dev, info) in entries.iteritems():
+ nameservers = []
+ net_fn = self.network_conf_dir + dev
+ net_cfg = {
+ 'Connection': 'ethernet',
+ 'Interface': dev,
+ 'IP': info.get('bootproto'),
+ 'Address': "('%s/%s')" % (info.get('address'),
+ info.get('netmask')),
+ 'Gateway': info.get('gateway'),
+ 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
+ }
+ util.write_file(net_fn, convert_netctl(net_cfg))
+ if info.get('auto'):
+ self._enable_interface(dev)
+ if 'dns-nameservers' in info:
+ nameservers.extend(info['dns-nameservers'])
+
+ if nameservers:
+ util.write_file(self.resolve_conf_fn,
+ convert_resolv_conf(nameservers))
+
+ return dev_names
+
+ def _enable_interface(self, device_name):
+ cmd = ['netctl', 'reenable', device_name]
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+
+ def _bring_up_interface(self, device_name):
+ cmd = ['netctl', 'restart', device_name]
+ LOG.debug("Attempting to run bring up interface %s using command %s",
+ device_name, cmd)
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ return True
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+ def _bring_up_interfaces(self, device_names):
+ for d in device_names:
+ if not self._bring_up_interface(d):
+ return False
+ return True
+
+ def _select_hostname(self, hostname, fqdn):
+ # Prefer the short hostname over the long
+ # fully qualified domain name
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def set_timezone(self, tz):
+ tz_file = self._find_tz_file(tz)
+ # Note: "" provides trailing newline during join
+ tz_lines = [
+ util.make_header(),
+ str(tz),
+ "",
+ ]
+ util.write_file(self.tz_conf_fn, "\n".join(tz_lines))
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['pacman']
+ # Redirect output
+ cmd.append("-Sy")
+ cmd.append("--quiet")
+ cmd.append("--noconfirm")
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ if command:
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["-y"], freq=PER_INSTANCE)
+
+
+def convert_netctl(settings):
+ """Returns a settings string formatted for netctl."""
+ result = ''
+ if isinstance(settings, dict):
+ for k, v in settings.items():
+ result = result + '%s=%s\n' % (k, v)
+ return result
+
+
+def convert_resolv_conf(settings):
+ """Returns a settings string formatted for resolv.conf."""
+ result = ''
+ if isinstance(settings, list):
+ for ns in list:
+ result = result + 'nameserver %s\n' % ns
+ return result
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
new file mode 100644
index 00000000..09f8d8ea
--- /dev/null
+++ b/cloudinit/distros/gentoo.py
@@ -0,0 +1,178 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 Rackspace, US Inc.
+#
+# Author: Nate House <nathan.house@rackspace.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import distros
+from cloudinit import helpers
+from cloudinit import log as logging
+from cloudinit import util
+
+from cloudinit.distros.parsers.hostname import HostnameConf
+
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+
+class Distro(distros.Distro):
+ locale_conf_fn = "/etc/locale.gen"
+ network_conf_fn = "/etc/conf.d/net"
+ tz_conf_fn = "/etc/timezone"
+ tz_local_fn = "/etc/localtime"
+ init_cmd = [''] # init scripts
+
+ def __init__(self, name, cfg, paths):
+ distros.Distro.__init__(self, name, cfg, paths)
+ # This will be used to restrict certain
+ # calls from repeatly happening (when they
+ # should only happen say once per instance...)
+ self._runner = helpers.Runners(paths)
+ self.osfamily = 'gentoo'
+ # Fix sshd restarts
+ cfg['ssh_svcname'] = '/etc/init.d/sshd'
+
+ def apply_locale(self, locale, out_fn=None):
+ if not out_fn:
+ out_fn = self.locale_conf_fn
+ util.subp(['locale-gen', '-G', locale], capture=False)
+ # "" provides trailing newline during join
+ lines = [
+ util.make_header(),
+ 'LANG="%s"' % (locale),
+ "",
+ ]
+ util.write_file(out_fn, "\n".join(lines))
+
+ def install_packages(self, pkglist):
+ self.update_package_sources()
+ self.package_command('', pkgs=pkglist)
+
+ def _write_network(self, settings):
+ util.write_file(self.network_conf_fn, settings)
+ return ['all']
+
+ def _bring_up_interface(self, device_name):
+ cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
+ LOG.debug("Attempting to run bring up interface %s using command %s",
+ device_name, cmd)
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s", cmd, err)
+ return True
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+
+ def _bring_up_interfaces(self, device_names):
+ use_all = False
+ for d in device_names:
+ if d == 'all':
+ use_all = True
+ if use_all:
+ # Grab device names from init scripts
+ cmd = ['ls', '/etc/init.d/net.*']
+ try:
+ (_out, err) = util.subp(cmd)
+ if len(err):
+ LOG.warn("Running %s resulted in stderr output: %s", cmd,
+ err)
+ except util.ProcessExecutionError:
+ util.logexc(LOG, "Running interface command %s failed", cmd)
+ return False
+ devices = [x.split('.')[2] for x in _out.split(' ')]
+ return distros.Distro._bring_up_interfaces(self, devices)
+ else:
+ return distros.Distro._bring_up_interfaces(self, device_names)
+
+ def _select_hostname(self, hostname, fqdn):
+ # Prefer the short hostname over the long
+ # fully qualified domain name
+ if not hostname:
+ return fqdn
+ return hostname
+
+ def _write_hostname(self, your_hostname, out_fn):
+ conf = None
+ try:
+ # Try to update the previous one
+ # so lets see if we can read it first.
+ conf = self._read_hostname_conf(out_fn)
+ except IOError:
+ pass
+ if not conf:
+ conf = HostnameConf('')
+ conf.set_hostname(your_hostname)
+ util.write_file(out_fn, str(conf), 0644)
+
+ def _read_system_hostname(self):
+ sys_hostname = self._read_hostname(self.hostname_conf_fn)
+ return (self.hostname_conf_fn, sys_hostname)
+
+ def _read_hostname_conf(self, filename):
+ conf = HostnameConf(util.load_file(filename))
+ conf.parse()
+ return conf
+
+ def _read_hostname(self, filename, default=None):
+ hostname = None
+ try:
+ conf = self._read_hostname_conf(filename)
+ hostname = conf.hostname
+ except IOError:
+ pass
+ if not hostname:
+ return default
+ return hostname
+
+ def set_timezone(self, tz):
+ tz_file = self._find_tz_file(tz)
+ # Note: "" provides trailing newline during join
+ tz_lines = [
+ util.make_header(),
+ str(tz),
+ "",
+ ]
+ util.write_file(self.tz_conf_fn, "\n".join(tz_lines))
+ # This ensures that the correct tz will be used for the system
+ util.copy(tz_file, self.tz_local_fn)
+
+ def package_command(self, command, args=None, pkgs=None):
+ if pkgs is None:
+ pkgs = []
+
+ cmd = ['emerge']
+ # Redirect output
+ cmd.append("--quiet")
+
+ if args and isinstance(args, str):
+ cmd.append(args)
+ elif args and isinstance(args, list):
+ cmd.extend(args)
+
+ if command:
+ cmd.append(command)
+
+ pkglist = util.expand_package_list('%s-%s', pkgs)
+ cmd.extend(pkglist)
+
+ # Allow the output of this to flow outwards (ie not be captured)
+ util.subp(cmd, capture=False)
+
+ def update_package_sources(self):
+ self._runner.run("update-sources", self.package_command,
+ ["-u", "world"], freq=PER_INSTANCE)
diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 7be2199a..7b0b18e7 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -37,6 +37,7 @@ CFG_BUILTIN = {
'OVF',
'MAAS',
'Ec2',
+ 'CloudSigma',
'CloudStack',
'SmartOS',
# At the end to act as a 'catch' when none of the above work...
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 97f151d6..c7331da5 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -34,6 +34,7 @@ DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
BOUNCE_COMMAND = ['sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
+DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START,
@@ -101,7 +102,7 @@ class DataSourceAzureNet(sources.DataSource):
except BrokenAzureDataSource as exc:
raise exc
except util.MountFailedError:
- LOG.warn("%s was not mountable" % cdev)
+ LOG.warn("%s was not mountable", cdev)
continue
(md, self.userdata_raw, cfg, files) = ret
@@ -128,10 +129,26 @@ class DataSourceAzureNet(sources.DataSource):
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
mycfg = self.ds_cfg
+ ddir = mycfg['data_dir']
+
+ if found != ddir:
+ cached_ovfenv = util.load_file(
+ os.path.join(ddir, 'ovf-env.xml'), quiet=True)
+ if cached_ovfenv != files['ovf-env.xml']:
+ # source was not walinux-agent's datadir, so we have to clean
+ # up so 'wait_for_files' doesn't return early due to stale data
+ cleaned = []
+ for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
+ if os.path.exists(f):
+ util.del_file(f)
+ cleaned.append(f)
+ if cleaned:
+ LOG.info("removed stale file(s) in '%s': %s",
+ ddir, str(cleaned))
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(mycfg['data_dir'], files, dirmode=0700)
+ write_files(ddir, files, dirmode=0700)
# handle the hostname 'publishing'
try:
@@ -139,7 +156,7 @@ class DataSourceAzureNet(sources.DataSource):
self.metadata.get('local-hostname'),
mycfg['hostname_bounce'])
except Exception as e:
- LOG.warn("Failed publishing hostname: %s" % e)
+ LOG.warn("Failed publishing hostname: %s", e)
util.logexc(LOG, "handling set_hostname failed")
try:
@@ -149,13 +166,13 @@ class DataSourceAzureNet(sources.DataSource):
util.logexc(LOG, "agent command '%s' failed.",
mycfg['agent_command'])
- shcfgxml = os.path.join(mycfg['data_dir'], "SharedConfig.xml")
+ shcfgxml = os.path.join(ddir, "SharedConfig.xml")
wait_for = [shcfgxml]
fp_files = []
for pk in self.cfg.get('_pubkeys', []):
bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(mycfg['data_dir'], bname)]
+ fp_files += [os.path.join(ddir, bname)]
missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
func=wait_for_files,
@@ -169,7 +186,7 @@ class DataSourceAzureNet(sources.DataSource):
try:
self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
except ValueError as e:
- LOG.warn("failed to get instance id in %s: %s" % (shcfgxml, e))
+ LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
pubkeys = pubkeys_from_crt_files(fp_files)
@@ -250,7 +267,7 @@ def pubkeys_from_crt_files(flist):
errors.append(fname)
if errors:
- LOG.warn("failed to convert the crt files to pubkey: %s" % errors)
+ LOG.warn("failed to convert the crt files to pubkey: %s", errors)
return pubkeys
@@ -281,7 +298,7 @@ def write_files(datadir, files, dirmode=None):
def invoke_agent(cmd):
# this is a function itself to simplify patching it for test
if cmd:
- LOG.debug("invoking agent: %s" % cmd)
+ LOG.debug("invoking agent: %s", cmd)
util.subp(cmd, shell=(not isinstance(cmd, list)))
else:
LOG.debug("not invoking agent")
@@ -328,7 +345,7 @@ def load_azure_ovf_pubkeys(sshnode):
continue
cur = {'fingerprint': "", 'path': ""}
for child in pk_node.childNodes:
- if (child.nodeType == text_node or not child.localName):
+ if child.nodeType == text_node or not child.localName:
continue
name = child.localName.lower()
@@ -414,7 +431,7 @@ def read_azure_ovf(contents):
# we accept either UserData or CustomData. If both are present
# then behavior is undefined.
- if (name == "userdata" or name == "customdata"):
+ if name == "userdata" or name == "customdata":
if attrs.get('encoding') in (None, "base64"):
ud = base64.b64decode(''.join(value.split()))
else:
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
new file mode 100644
index 00000000..78acd8a4
--- /dev/null
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -0,0 +1,91 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2014 CloudSigma
+#
+# Author: Kiril Vladimiroff <kiril.vladimiroff@cloudsigma.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import re
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit.cs_utils import Cepko
+
+LOG = logging.getLogger(__name__)
+
+VALID_DSMODES = ("local", "net", "disabled")
+
+
+class DataSourceCloudSigma(sources.DataSource):
+ """
+ Uses cepko in order to gather the server context from the VM.
+
+ For more information about CloudSigma's Server Context:
+ http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
+ """
+ def __init__(self, sys_cfg, distro, paths):
+ self.dsmode = 'local'
+ self.cepko = Cepko()
+ self.ssh_public_key = ''
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+
+ def get_data(self):
+ """
+ Metadata is the whole server context and /meta/cloud-config is used
+ as userdata.
+ """
+ try:
+ server_context = self.cepko.all().result
+ server_meta = server_context['meta']
+ self.userdata_raw = server_meta.get('cloudinit-user-data', "")
+ self.metadata = server_context
+ self.ssh_public_key = server_meta['ssh_public_key']
+
+ if server_meta.get('cloudinit-dsmode') in VALID_DSMODES:
+ self.dsmode = server_meta['cloudinit-dsmode']
+ except:
+ util.logexc(LOG, "Failed reading from the serial port")
+ return False
+ return True
+
+ def get_hostname(self, fqdn=False, resolve_ip=False):
+ """
+ Cleans up and uses the server's name if the latter is set. Otherwise
+ the first part from uuid is being used.
+ """
+ if re.match(r'^[A-Za-z0-9 -_\.]+$', self.metadata['name']):
+ return self.metadata['name'][:61]
+ else:
+ return self.metadata['uuid'].split('-')[0]
+
+ def get_public_ssh_keys(self):
+ return [self.ssh_public_key]
+
+ def get_instance_id(self):
+ return self.metadata['uuid']
+
+
+# Used to match classes to dependencies. Since this datasource uses the serial
+# port network is not really required, so it's okay to load without it, too.
+datasources = [
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM)),
+ (DataSourceCloudSigma, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+def get_datasource_list(depends):
+ """
+ Return a list of data sources that match this set of dependencies
+ """
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index f010e640..1b20ecf3 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -92,12 +92,9 @@ class DataSourceEc2(sources.DataSource):
except Exception:
util.logexc(LOG, "Failed to get max wait. using %s", max_wait)
- if max_wait == 0:
- return False
-
timeout = 50
try:
- timeout = int(mcfg.get("timeout", timeout))
+ timeout = max(0, int(mcfg.get("timeout", timeout)))
except Exception:
util.logexc(LOG, "Failed to get timeout, using %s", timeout)
@@ -109,6 +106,8 @@ class DataSourceEc2(sources.DataSource):
mcfg = {}
(max_wait, timeout) = self._get_url_settings()
+ if max_wait <= 0:
+ return False
# Remove addresses from the list that wont resolve.
mdurls = mcfg.get("metadata_urls", DEF_MD_URLS)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 593b72a2..7acd3355 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -632,7 +632,6 @@ class Modules(object):
return mostly_mods
def _run_modules(self, mostly_mods):
- d_name = self.init.distro.name
cc = self.init.cloudify()
# Return which ones ran
# and which ones failed + the exception of why it failed
@@ -646,15 +645,6 @@ class Modules(object):
if not freq in FREQUENCIES:
freq = PER_INSTANCE
- worked_distros = set(mod.distros)
- worked_distros.update(
- distros.Distro.expand_osfamily(mod.osfamilies))
-
- if (worked_distros and d_name not in worked_distros):
- LOG.warn(("Module %s is verified on %s distros"
- " but not on %s distro. It may or may not work"
- " correctly."), name, list(worked_distros),
- d_name)
# Use the configs logger and not our own
# TODO(harlowja): possibly check the module
# for having a LOG attr and just give it back
@@ -686,6 +676,32 @@ class Modules(object):
def run_section(self, section_name):
raw_mods = self._read_modules(section_name)
mostly_mods = self._fixup_modules(raw_mods)
+ d_name = self.init.distro.name
+
+ skipped = []
+ forced = []
+ overridden = self.cfg.get('unverified_modules', [])
+ for (mod, name, _freq, _args) in mostly_mods:
+ worked_distros = set(mod.distros)
+ worked_distros.update(
+ distros.Distro.expand_osfamily(mod.osfamilies))
+
+ # module does not declare 'distros' or lists this distro
+ if not worked_distros or d_name in worked_distros:
+ continue
+
+ if name in overridden:
+ forced.append(name)
+ else:
+ skipped.append(name)
+
+ if skipped:
+ LOG.info("Skipping modules %s because they are not verified "
+ "on distro '%s'. To run anyway, add them to "
+ "'unverified_modules' in config.", skipped, d_name)
+ if forced:
+ LOG.info("running unverified_modules: %s", forced)
+
return self._run_modules(mostly_mods)
diff --git a/doc/examples/cloud-config-landscape.txt b/doc/examples/cloud-config-landscape.txt
index 74e07b62..d7ff8ef8 100644
--- a/doc/examples/cloud-config-landscape.txt
+++ b/doc/examples/cloud-config-landscape.txt
@@ -16,7 +16,7 @@ landscape:
data_path: "/var/lib/landscape/client"
http_proxy: "http://my.proxy.com/foobar"
tags: "server,cloud"
- computer_title = footitle
- https_proxy = fooproxy
- registration_key = fookey
- account_name = fooaccount
+ computer_title: footitle
+ https_proxy: fooproxy
+ registration_key: fookey
+ account_name: fooaccount
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 61fa6065..ed4eb7fc 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -319,6 +319,16 @@ cloud_config_modules:
- runcmd
- byobu
+# unverified_modules: []
+# if a config module declares a set of distros as supported then it will be
+# skipped if running on a different distro. to override this sanity check,
+# provide a list of modules that should be run anyway in 'unverified_modules'.
+# The default is an empty list (ie, trust modules).
+#
+# Example:
+# unverified_modules: ['apt-update-upgrade']
+# default: []
+
# ssh_import_id: [ user1, user2 ]
# ssh_import_id will feed the list in that variable to
# ssh-import-id, so that public keys stored in launchpad
diff --git a/doc/sources/cloudsigma/README.rst b/doc/sources/cloudsigma/README.rst
new file mode 100644
index 00000000..8cb2b0fe
--- /dev/null
+++ b/doc/sources/cloudsigma/README.rst
@@ -0,0 +1,34 @@
+=====================
+CloudSigma Datasource
+=====================
+
+This datasource finds metadata and user-data from the `CloudSigma`_ cloud platform.
+Data transfer occurs through a virtual serial port of the `CloudSigma`_'s VM and the
+presence of network adapter is **NOT** a requirement,
+
+ See `server context`_ in the public documentation for more information.
+
+
+Setting a hostname
+~~~~~~~~~~~~~~~~~~
+
+By default the name of the server will be applied as a hostname on the first boot.
+
+
+Providing user-data
+~~~~~~~~~~~~~~~~~~~
+
+You can provide user-data to the VM using the dedicated `meta field`_ in the `server context`_
+``cloudinit-user-data``. By default *cloud-config* format is expected there and the ``#cloud-config``
+header could be omitted. However since this is a raw-text field you could provide any of the valid
+`config formats`_.
+
+If your user-data needs an internet connection you have to create a `meta field`_ in the `server context`_
+``cloudinit-dsmode`` and set "net" as value. If this field does not exist the default value is "local".
+
+
+
+.. _CloudSigma: http://cloudsigma.com/
+.. _server context: http://cloudsigma-docs.readthedocs.org/en/latest/server_context.html
+.. _meta field: http://cloudsigma-docs.readthedocs.org/en/latest/meta.html
+.. _config formats: http://cloudinit.readthedocs.org/en/latest/topics/format.html
diff --git a/packages/redhat/cloud-init.spec.in b/packages/redhat/cloud-init.spec.in
index 9614e2f1..0e9862d8 100644
--- a/packages/redhat/cloud-init.spec.in
+++ b/packages/redhat/cloud-init.spec.in
@@ -34,7 +34,7 @@ Requires: e2fsprogs
Requires: net-tools
Requires: procps
Requires: shadow-utils
-Requires: sudo
+Requires: sudo >= 1.7.2p2-3
# Install pypi 'dynamic' requirements
#for $r in $requires
@@ -169,7 +169,7 @@ fi
/usr/lib/%{name}/write-ssh-key-fingerprints
# Docs
-%doc TODO LICENSE ChangeLog Requires
+%doc TODO LICENSE ChangeLog requirements.txt
%doc %{_defaultdocdir}/cloud-init/*
# Configs
@@ -180,6 +180,7 @@ fi
%dir %{_sysconfdir}/cloud/templates
%config(noreplace) %{_sysconfdir}/cloud/templates/*
%config(noreplace) %{_sysconfdir}/rsyslog.d/21-cloudinit.conf
+%config(noreplace) %{_sysconfdir}/sudoers.d/cloud-init
# Python code is here...
%{python_sitelib}/*
diff --git a/packages/suse/cloud-init.spec.in b/packages/suse/cloud-init.spec.in
index c30a6fae..53e6ad13 100644
--- a/packages/suse/cloud-init.spec.in
+++ b/packages/suse/cloud-init.spec.in
@@ -107,7 +107,7 @@ rm -r %{buildroot}/%{python_sitelib}/tests
# Move documentation
mkdir -p %{buildroot}/%{_defaultdocdir}
mv %{buildroot}/usr/share/doc/cloud-init %{buildroot}/%{_defaultdocdir}
-for doc in TODO LICENSE ChangeLog Requires ; do
+for doc in TODO LICENSE ChangeLog requirements.txt; do
cp \${doc} %{buildroot}/%{_defaultdocdir}/cloud-init
done
diff --git a/requirements.txt b/requirements.txt
index 8f695c68..fdcbd143 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -10,8 +10,8 @@ PrettyTable
# datasource is removed, this is no longer needed
oauth
-# This one is currently used only by the SmartOS datasource. If that
-# datasource is removed, this is no longer needed
+# This one is currently used only by the CloudSigma and SmartOS datasources.
+# If these datasources are removed, this is no longer needed
pyserial
# This is only needed for places where we need to support configs in a manner
diff --git a/sysvinit/gentoo/cloud-config b/sysvinit/gentoo/cloud-config
new file mode 100644
index 00000000..b0fa786d
--- /dev/null
+++ b/sysvinit/gentoo/cloud-config
@@ -0,0 +1,13 @@
+#!/sbin/runscript
+
+depend() {
+ after cloud-init-local
+ after cloud-init
+ before cloud-final
+ provide cloud-config
+}
+
+start() {
+ cloud-init modules --mode config
+ eend 0
+}
diff --git a/sysvinit/gentoo/cloud-final b/sysvinit/gentoo/cloud-final
new file mode 100644
index 00000000..b457a354
--- /dev/null
+++ b/sysvinit/gentoo/cloud-final
@@ -0,0 +1,11 @@
+#!/sbin/runscript
+
+depend() {
+ after cloud-config
+ provide cloud-final
+}
+
+start() {
+ cloud-init modules --mode final
+ eend 0
+}
diff --git a/sysvinit/gentoo/cloud-init b/sysvinit/gentoo/cloud-init
new file mode 100644
index 00000000..9ab64ad8
--- /dev/null
+++ b/sysvinit/gentoo/cloud-init
@@ -0,0 +1,12 @@
+#!/sbin/runscript
+# add depends for network, dns, fs etc
+depend() {
+ after cloud-init-local
+ before cloud-config
+ provide cloud-init
+}
+
+start() {
+ cloud-init init
+ eend 0
+}
diff --git a/sysvinit/gentoo/cloud-init-local b/sysvinit/gentoo/cloud-init-local
new file mode 100644
index 00000000..9d47263e
--- /dev/null
+++ b/sysvinit/gentoo/cloud-init-local
@@ -0,0 +1,13 @@
+#!/sbin/runscript
+
+depend() {
+ after localmount
+ after netmount
+ before cloud-init
+ provide cloud-init-local
+}
+
+start() {
+ cloud-init init --local
+ eend 0
+}
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
new file mode 100644
index 00000000..7d59222b
--- /dev/null
+++ b/tests/unittests/test_cs_util.py
@@ -0,0 +1,65 @@
+from mocker import MockerTestCase
+
+from cloudinit.cs_utils import Cepko
+
+
+SERVER_CONTEXT = {
+ "cpu": 1000,
+ "cpus_instead_of_cores": False,
+ "global_context": {"some_global_key": "some_global_val"},
+ "mem": 1073741824,
+ "meta": {"ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe"},
+ "name": "test_server",
+ "requirements": [],
+ "smp": 1,
+ "tags": ["much server", "very performance"],
+ "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e889",
+ "vnc_password": "9e84d6cb49e46379"
+}
+
+
+class CepkoMock(Cepko):
+ def all(self):
+ return SERVER_CONTEXT
+
+ def get(self, key="", request_pattern=None):
+ return SERVER_CONTEXT['tags']
+
+
+class CepkoResultTests(MockerTestCase):
+ def setUp(self):
+ self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko",
+ spec=CepkoMock,
+ count=False,
+ passthrough=False)
+ self.mocked()
+ self.mocker.result(CepkoMock())
+ self.mocker.replay()
+ self.c = Cepko()
+
+ def test_getitem(self):
+ result = self.c.all()
+ self.assertEqual("65b2fb23-8c03-4187-a3ba-8b7c919e889", result['uuid'])
+ self.assertEqual([], result['requirements'])
+ self.assertEqual("much server", result['tags'][0])
+ self.assertEqual(1, result['smp'])
+
+ def test_len(self):
+ self.assertEqual(len(SERVER_CONTEXT), len(self.c.all()))
+
+ def test_contains(self):
+ result = self.c.all()
+ self.assertTrue('uuid' in result)
+ self.assertFalse('uid' in result)
+ self.assertTrue('meta' in result)
+ self.assertFalse('ssh_public_key' in result)
+
+ def test_iter(self):
+ self.assertEqual(sorted(SERVER_CONTEXT.keys()),
+ sorted([key for key in self.c.all()]))
+
+ def test_with_list_as_result(self):
+ result = self.c.get('tags')
+ self.assertEqual('much server', result[0])
+ self.assertTrue('very performance' in result)
+ self.assertEqual(2, len(result))
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index aad84206..44c537f4 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,4 +1,5 @@
from cloudinit import helpers
+from cloudinit.util import load_file
from cloudinit.sources import DataSourceAzure
from tests.unittests.helpers import populate_dir
@@ -6,6 +7,7 @@ import base64
import crypt
from mocker import MockerTestCase
import os
+import stat
import yaml
@@ -72,6 +74,7 @@ class TestAzureDataSource(MockerTestCase):
# patch cloud_dir, so our 'seed_dir' is guaranteed empty
self.paths = helpers.Paths({'cloud_dir': self.tmp})
+ self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
self.unapply = []
super(TestAzureDataSource, self).setUp()
@@ -92,13 +95,6 @@ class TestAzureDataSource(MockerTestCase):
def _invoke_agent(cmd):
data['agent_invoked'] = cmd
- def _write_files(datadir, files, dirmode):
- data['files'] = {}
- data['datadir'] = datadir
- data['datadir_mode'] = dirmode
- for (fname, content) in files.items():
- data['files'][fname] = content
-
def _wait_for_files(flist, _maxwait=None, _naplen=None):
data['waited'] = flist
return []
@@ -119,11 +115,11 @@ class TestAzureDataSource(MockerTestCase):
{'ovf-env.xml': data['ovfcontent']})
mod = DataSourceAzure
+ mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
self.apply_patches([(mod, 'list_possible_azure_ds_devs', dsdevs)])
self.apply_patches([(mod, 'invoke_agent', _invoke_agent),
- (mod, 'write_files', _write_files),
(mod, 'wait_for_files', _wait_for_files),
(mod, 'pubkeys_from_crt_files',
_pubkeys_from_crt_files),
@@ -147,10 +143,18 @@ class TestAzureDataSource(MockerTestCase):
self.assertTrue(ret)
self.assertEqual(dsrc.userdata_raw, "")
self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
- self.assertTrue('ovf-env.xml' in data['files'])
- self.assertEqual(0700, data['datadir_mode'])
+ self.assertTrue(os.path.isfile(
+ os.path.join(self.waagent_d, 'ovf-env.xml')))
self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id')
+ def test_waagent_d_has_0700_perms(self):
+ # we expect /var/lib/waagent to be created 0700
+ dsrc = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(os.path.isdir(self.waagent_d))
+ self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0700)
+
def test_user_cfg_set_agent_command_plain(self):
# set dscfg in via plaintext
# we must have friendly-to-xml formatted plaintext in yaml_cfg
@@ -338,6 +342,65 @@ class TestAzureDataSource(MockerTestCase):
self.assertEqual(userdata, dsrc.userdata_raw)
+ def test_ovf_env_arrives_in_waagent_dir(self):
+ xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
+ dsrc = self._get_ds({'ovfcontent': xml})
+ dsrc.get_data()
+
+ # 'data_dir' is '/var/lib/waagent' (walinux-agent's state dir)
+ # we expect that the ovf-env.xml file is copied there.
+ ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
+ self.assertTrue(os.path.exists(ovf_env_path))
+ self.assertEqual(xml, load_file(ovf_env_path))
+
+ def test_existing_ovf_same(self):
+ # waagent/SharedConfig left alone if found ovf-env.xml same as cached
+ odata = {'UserData': base64.b64encode("SOMEUSERDATA")}
+ data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+ populate_dir(self.waagent_d,
+ {'ovf-env.xml': data['ovfcontent'],
+ 'otherfile': 'otherfile-content',
+ 'SharedConfig.xml': 'mysharedconfig'})
+
+ dsrc = self._get_ds(data)
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertTrue(os.path.exists(
+ os.path.join(self.waagent_d, 'ovf-env.xml')))
+ self.assertTrue(os.path.exists(
+ os.path.join(self.waagent_d, 'otherfile')))
+ self.assertTrue(os.path.exists(
+ os.path.join(self.waagent_d, 'SharedConfig.xml')))
+
+ def test_existing_ovf_diff(self):
+ # waagent/SharedConfig must be removed if ovfenv is found elsewhere
+
+ # 'get_data' should remove SharedConfig.xml in /var/lib/waagent
+ # if ovf-env.xml differs.
+ cached_ovfenv = construct_valid_ovf_env(
+ {'userdata': base64.b64encode("FOO_USERDATA")})
+ new_ovfenv = construct_valid_ovf_env(
+ {'userdata': base64.b64encode("NEW_USERDATA")})
+
+ populate_dir(self.waagent_d,
+ {'ovf-env.xml': cached_ovfenv,
+ 'SharedConfig.xml': "mysharedconfigxml",
+ 'otherfile': 'otherfilecontent'})
+
+ dsrc = self._get_ds({'ovfcontent': new_ovfenv})
+ ret = dsrc.get_data()
+ self.assertTrue(ret)
+ self.assertEqual(dsrc.userdata_raw, "NEW_USERDATA")
+ self.assertTrue(os.path.exists(
+ os.path.join(self.waagent_d, 'otherfile')))
+ self.assertFalse(
+ os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml')))
+ self.assertTrue(
+ os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml')))
+ self.assertEqual(new_ovfenv,
+ load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
+
class TestReadAzureOvf(MockerTestCase):
def test_invalid_xml_raises_non_azure_ds(self):
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
new file mode 100644
index 00000000..3245aba1
--- /dev/null
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -0,0 +1,59 @@
+# coding: utf-8
+from unittest import TestCase
+
+from cloudinit.cs_utils import Cepko
+from cloudinit.sources import DataSourceCloudSigma
+
+
+SERVER_CONTEXT = {
+ "cpu": 1000,
+ "cpus_instead_of_cores": False,
+ "global_context": {"some_global_key": "some_global_val"},
+ "mem": 1073741824,
+ "meta": {
+ "ssh_public_key": "ssh-rsa AAAAB3NzaC1yc2E.../hQ5D5 john@doe",
+ "cloudinit-user-data": "#cloud-config\n\n...",
+ },
+ "name": "test_server",
+ "requirements": [],
+ "smp": 1,
+ "tags": ["much server", "very performance"],
+ "uuid": "65b2fb23-8c03-4187-a3ba-8b7c919e8890",
+ "vnc_password": "9e84d6cb49e46379"
+}
+
+
+class CepkoMock(Cepko):
+ result = SERVER_CONTEXT
+
+ def all(self):
+ return self
+
+
+class DataSourceCloudSigmaTest(TestCase):
+ def setUp(self):
+ self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "")
+ self.datasource.cepko = CepkoMock()
+ self.datasource.get_data()
+
+ def test_get_hostname(self):
+ self.assertEqual("test_server", self.datasource.get_hostname())
+ self.datasource.metadata['name'] = ''
+ self.assertEqual("65b2fb23", self.datasource.get_hostname())
+ self.datasource.metadata['name'] = u'ั‚ะตัั‚'
+ self.assertEqual("65b2fb23", self.datasource.get_hostname())
+
+ def test_get_public_ssh_keys(self):
+ self.assertEqual([SERVER_CONTEXT['meta']['ssh_public_key']],
+ self.datasource.get_public_ssh_keys())
+
+ def test_get_instance_id(self):
+ self.assertEqual(SERVER_CONTEXT['uuid'],
+ self.datasource.get_instance_id())
+
+ def test_metadata(self):
+ self.assertEqual(self.datasource.metadata, SERVER_CONTEXT)
+
+ def test_user_data(self):
+ self.assertEqual(self.datasource.userdata_raw,
+ SERVER_CONTEXT['meta']['cloudinit-user-data'])