From b7632baa817a8425c2dcab4a01e2f7f0983e5f9e Mon Sep 17 00:00:00 2001
From: Wayne Witzel III <wayne.witzel@canonical.com>
Date: Fri, 19 Dec 2014 11:28:35 -0500
Subject: add user-data encoding support for gce

---
 cloudinit/sources/DataSourceGCE.py | 9 +++++++++
 1 file changed, 9 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 7091e3c1..e6f3651c 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -15,6 +15,8 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 
+from base64 import b64decode
+
 from cloudinit import log as logging
 from cloudinit import util
 from cloudinit import sources
@@ -58,6 +60,7 @@ class DataSourceGCE(sources.DataSource):
             ('local-hostname', 'instance/hostname', True),
             ('public-keys', 'project/attributes/sshKeys', False),
             ('user-data', 'instance/attributes/user-data', False),
+            ('user-data-encoding', 'instance/attributes/user-data-encoding', False),
         ]
 
         # if we cannot resolve the metadata server, then no point in trying
@@ -101,6 +104,12 @@ class DataSourceGCE(sources.DataSource):
             lines = self.metadata['public-keys'].splitlines()
             self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
 
+        if self.metadata.get('user-data-encoding'):
+            if self.metadata['user-data-encoding'] == 'base64':
+                self.metadata['user-data'] = b64decode(self.metadata['user-data'])
+            else:
+                LOG.warn('user-data-encoding: unknown encoding specified', None, None)
+
         return found
 
     @property
-- 
cgit v1.2.3


From c8ab70b9413e3d8d0f240d968023258b8a0970e6 Mon Sep 17 00:00:00 2001
From: Wayne Witzel III <wayne.witzel@canonical.com>
Date: Tue, 6 Jan 2015 11:41:05 -0500
Subject: Corrected errant logging message.

---
 cloudinit/sources/DataSourceGCE.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index e6f3651c..92e5a28e 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -29,7 +29,6 @@ BUILTIN_DS_CONFIG = {
 }
 REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
 
-
 class DataSourceGCE(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -104,11 +103,12 @@ class DataSourceGCE(sources.DataSource):
             lines = self.metadata['public-keys'].splitlines()
             self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
 
-        if self.metadata.get('user-data-encoding'):
-            if self.metadata['user-data-encoding'] == 'base64':
+        encoding = self.metadata.get('user-data-encoding')
+        if encoding:
+            if encoding == 'base64':
                 self.metadata['user-data'] = b64decode(self.metadata['user-data'])
             else:
-                LOG.warn('user-data-encoding: unknown encoding specified', None, None)
+                LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
 
         return found
 
-- 
cgit v1.2.3


From 38c851e58e09c5574661ef4b2d2e66f6e38063d1 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 6 Jan 2015 12:02:38 -0500
Subject: tools/run-pep8: remove leading ',' fed to --ignore

--ignore was being called with ',E121,E...' rather than
'E121,E...'.

that resulted in odd behavior, missing the pep8 errors that are fixed
here.
---
 cloudinit/distros/freebsd.py                              |  3 ++-
 cloudinit/distros/net_util.py                             |  1 -
 cloudinit/distros/rhel.py                                 |  2 +-
 cloudinit/sources/DataSourceConfigDrive.py                |  4 ++--
 cloudinit/sources/DataSourceDigitalOcean.py               | 11 ++++++-----
 cloudinit/util.py                                         |  2 +-
 doc/rtd/conf.py                                           |  6 +++---
 setup.py                                                  |  2 +-
 tests/unittests/test_datasource/test_digitalocean.py      |  1 +
 tests/unittests/test_datasource/test_gce.py               |  3 ++-
 tests/unittests/test_datasource/test_smartos.py           |  4 ++--
 tests/unittests/test_distros/test_netconfig.py            |  2 --
 tests/unittests/test_handler/test_handler_yum_add_repo.py |  2 +-
 tests/unittests/test_templating.py                        |  3 ++-
 tools/run-pep8                                            |  1 +
 15 files changed, 25 insertions(+), 22 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index ee23fd20..9216510e 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -119,7 +119,8 @@ class Distro(distros.Distro):
         index = n.group(0)
 
         (out, err) = util.subp(['ifconfig', '-a'])
-        ifconfigoutput = [x for x in (out.strip()).splitlines() if len(x.split()) > 0]
+        ifconfigoutput = [x for x in (out.strip()).splitlines()
+                          if len(x.split()) > 0]
         for line in ifconfigoutput:
             m = re.match('^\w+', line)
             if m:
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index dd63a6a3..8b28e2d1 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -180,4 +180,3 @@ def translate_network(settings):
         if cmd == 'iface' and 'inet6' in args:
             real_ifaces[dev_name]['inet6'] = True
     return real_ifaces
-
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index d5cc15fe..d9588632 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -91,7 +91,7 @@ class Distro(distros.Distro):
                     'IPV6INIT': _make_sysconfig_bool(True),
                     'IPV6ADDR': info.get('ipv6').get('address'),
                     'IPV6_DEFAULTGW': info.get('ipv6').get('gateway'),
-            })
+                })
             rhel_util.update_sysconfig_file(net_fn, net_cfg)
             if 'dns-nameservers' in info:
                 nameservers.extend(info['dns-nameservers'])
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 27658073..15244a0d 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -79,8 +79,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
                     else:
                         mtype = None
                         sync = True
-                    results = util.mount_cb(dev, read_config_drive, mtype=mtype,
-                                            sync=sync)
+                    results = util.mount_cb(dev, read_config_drive,
+                                            mtype=mtype, sync=sync)
                     found = dev
                 except openstack.NonReadable:
                     pass
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 069bdb41..8f27ee89 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -31,6 +31,7 @@ BUILTIN_DS_CONFIG = {
 MD_RETRIES = 0
 MD_TIMEOUT = 1
 
+
 class DataSourceDigitalOcean(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -51,10 +52,10 @@ class DataSourceDigitalOcean(sources.DataSource):
             self.timeout = MD_TIMEOUT
 
     def get_data(self):
-        caller = functools.partial(util.read_file_or_url, timeout=self.timeout, 
-                                   retries=self.retries)
+        caller = functools.partial(util.read_file_or_url,
+                                   timeout=self.timeout, retries=self.retries)
         md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
-                                            base_url=self.metadata_address, 
+                                            base_url=self.metadata_address,
                                             caller=caller)
 
         self.metadata = md.materialize()
@@ -72,9 +73,9 @@ class DataSourceDigitalOcean(sources.DataSource):
 
     def get_public_ssh_keys(self):
         if type(self.metadata['public-keys']) is StringType:
-           return [self.metadata['public-keys']]
+            return [self.metadata['public-keys']]
         else:
-           return self.metadata['public-keys']
+            return self.metadata['public-keys']
 
     @property
     def availability_zone(self):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index ee5e5c0a..bf8e7d80 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1966,7 +1966,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
 def read_meminfo(meminfo="/proc/meminfo", raw=False):
     # read a /proc/meminfo style file and return
     # a dict with 'total', 'free', and 'available'
-    mpliers = {'kB': 2**10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
+    mpliers = {'kB': 2 ** 10, 'mB': 2 ** 20, 'B': 1, 'gB': 2 ** 30}
     kmap = {'MemTotal:': 'total', 'MemFree:': 'free',
             'MemAvailable:': 'available'}
     ret = {}
diff --git a/doc/rtd/conf.py b/doc/rtd/conf.py
index 9be02766..d3764bea 100644
--- a/doc/rtd/conf.py
+++ b/doc/rtd/conf.py
@@ -12,8 +12,8 @@ sys.path.insert(0, os.path.abspath('.'))
 from cloudinit import version
 
 # Supress warnings for docs that aren't used yet
-#unused_docs = [
-#]
+# unused_docs = [
+# ]
 
 # General information about the project.
 project = 'Cloud-Init'
@@ -21,7 +21,7 @@ project = 'Cloud-Init'
 # -- General configuration ----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-#needs_sphinx = '1.0'
+# needs_sphinx = '1.0'
 
 # Add any Sphinx extension module names here, as strings. They can be
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
diff --git a/setup.py b/setup.py
index ab72ec18..25f09e58 100755
--- a/setup.py
+++ b/setup.py
@@ -164,7 +164,7 @@ else:
         (USR + '/share/doc/cloud-init', [f for f in glob('doc/*') if is_f(f)]),
         (USR + '/share/doc/cloud-init/examples',
             [f for f in glob('doc/examples/*') if is_f(f)]),
-        (USR + '/share/doc/cloud-init/examples/seed', 
+        (USR + '/share/doc/cloud-init/examples/seed',
             [f for f in glob('doc/examples/seed/*') if is_f(f)]),
     ]
     # Use a subclass for install that handles
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index 04bee340..d1270fc2 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -51,6 +51,7 @@ DO_META = {
 
 MD_URL_RE = re.compile(r'http://169.254.169.254/metadata/v1/.*')
 
+
 def _request_callback(method, uri, headers):
     url_path = urlparse(uri).path
     if url_path.startswith('/metadata/v1/'):
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 60a0ce48..842a72ba 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -40,7 +40,8 @@ GCE_META_PARTIAL = {
 }
 
 HEADERS = {'X-Google-Metadata-Request': 'True'}
-MD_URL_RE = re.compile(r'http://metadata.google.internal./computeMetadata/v1/.*')
+MD_URL_RE = re.compile(
+    r'http://metadata.google.internal./computeMetadata/v1/.*')
 
 
 def _request_callback(method, uri, headers):
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index b197b600..65675106 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -342,8 +342,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         """
             User-data in the SmartOS world is supposed to be written to a file
             each and every boot. This tests to make sure that in the event the
-            legacy user-data is removed, the existing user-data is backed-up and
-            there is no /var/db/user-data left.
+            legacy user-data is removed, the existing user-data is backed-up
+            and there is no /var/db/user-data left.
         """
 
         user_data_f = "%s/mdata-user-data" % self.legacy_user_d
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index dbbf9617..193338e8 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -294,7 +294,6 @@ IPV6_AUTOCONF=no
         self.assertCfgEquals(expected_buf, str(write_buf))
         self.assertEquals(write_buf.mode, 0644)
 
-
     def test_simple_write_freebsd(self):
         fbsd_distro = self._get_distro('freebsd')
         util_mock = self.mocker.replace(util.write_file,
@@ -357,4 +356,3 @@ defaultrouter="192.168.1.254"
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
         self.assertEquals(write_buf.mode, 0644)
-
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index 21b89c34..435c9787 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -24,7 +24,7 @@ class TestConfig(helpers.FilesystemMockingTestCase):
                 'epel-testing': {
                     'name': 'Extra Packages for Enterprise Linux 5 - Testing',
                     # Missing this should cause the repo not to be written
-                    # 'baseurl': 'http://blah.org/pub/epel/testing/5/$basearch',
+                    # 'baseurl': 'http://blah.org/pub/epel/testing/5/$barch',
                     'enabled': False,
                     'gpgcheck': True,
                     'gpgkey': 'file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL',
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index 87681f0f..3ba4ed8a 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -100,7 +100,8 @@ $a,$b'''
         mirror = "mymirror"
         codename = "zany"
         in_data = "deb $mirror $codename-updates main contrib non-free"
-        ex_data = "deb %s %s-updates main contrib non-free" % (mirror, codename)
+        ex_data = "deb %s %s-updates main contrib non-free" % (mirror,
+                                                               codename)
 
         out_data = templater.basic_render(in_data,
             {'mirror': mirror, 'codename': codename})
diff --git a/tools/run-pep8 b/tools/run-pep8
index d0a131f6..ccd6be5a 100755
--- a/tools/run-pep8
+++ b/tools/run-pep8
@@ -24,6 +24,7 @@ IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent
 IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent
 IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent
 IGNORE="$IGNORE,E502" # The backslash is redundant between brackets
+IGNORE="${IGNORE#,}"  # remove the leading ',' added above
 
 cmd=(
     ${base}/hacking.py
-- 
cgit v1.2.3


From 29ab69a1e550ba52c436a6c605aa691d1f20623c Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 13 Jan 2015 11:34:49 +0000
Subject: Fix lsblk output option.

---
 cloudinit/config/cc_disk_setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 1660832b..6c5047a7 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -151,7 +151,7 @@ def enumerate_disk(device, nodeps=False):
         name: the device name, i.e. sda
     """
 
-    lsblk_cmd = [LSBLK_CMD, '--pairs', '--out', 'NAME,TYPE,FSTYPE,LABEL',
+    lsblk_cmd = [LSBLK_CMD, '--pairs', '--output', 'NAME,TYPE,FSTYPE,LABEL',
                  device]
 
     if nodeps:
-- 
cgit v1.2.3


From 1a56b32de0d2954c172e2de2c756e08471e47b6e Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 13 Jan 2015 11:34:49 +0000
Subject: Implement check_partition_gpt_layout.

This includes moving some shared logic in to check_partition_layout.
---
 cloudinit/config/cc_disk_setup.py | 42 ++++++++++++++++++++++++++++-----------
 1 file changed, 30 insertions(+), 12 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 6c5047a7..8334657e 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -393,6 +393,36 @@ def check_partition_mbr_layout(device, layout):
                     break
 
             found_layout.append(type_label)
+    return found_layout
+
+
+def check_partition_gpt_layout(device, layout):
+    prt_cmd = ['sgdisk', '-p', device]
+    try:
+        out, _err = util.subp(prt_cmd)
+    except Exception as e:
+        raise Exception("Error running partition command on %s\n%s" % (
+                        device, e))
+
+    out_lines = iter(out.splitlines())
+    # Skip header
+    for line in out_lines:
+        if line.strip().startswith('Number'):
+            break
+
+    return [line.strip().split()[-1] for line in out_lines]
+
+
+def check_partition_layout(table_type, device, layout):
+    """
+    See if the partition lay out matches.
+
+    This is future a future proofing function. In order
+    to add support for other disk layout schemes, add a
+    function called check_partition_%s_layout
+    """
+    found_layout = get_dyn_func(
+        "check_partition_%s_layout", table_type, device, layout)
 
     if isinstance(layout, bool):
         # if we are using auto partitioning, or "True" be happy
@@ -417,18 +447,6 @@ def check_partition_mbr_layout(device, layout):
     return False
 
 
-def check_partition_layout(table_type, device, layout):
-    """
-    See if the partition lay out matches.
-
-    This is future a future proofing function. In order
-    to add support for other disk layout schemes, add a
-    function called check_partition_%s_layout
-    """
-    return get_dyn_func("check_partition_%s_layout", table_type, device,
-                        layout)
-
-
 def get_partition_mbr_layout(size, layout):
     """
     Calculate the layout of the partition table. Partition sizes
-- 
cgit v1.2.3


From ed6219dbb5dc2fada4b9b86e7c9b94d2d35dcb7f Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 13 Jan 2015 11:34:49 +0000
Subject: Initial run at GPT disk handling.

---
 cloudinit/config/cc_disk_setup.py | 42 ++++++++++++++++++++++++++++++++++++++-
 1 file changed, 41 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 8334657e..dc607533 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -27,6 +27,7 @@ frequency = PER_INSTANCE
 # Define the commands to use
 UDEVADM_CMD = util.which('udevadm')
 SFDISK_CMD = util.which("sfdisk")
+SGDISK_CMD = util.which("sgdisk")
 LSBLK_CMD = util.which("lsblk")
 BLKID_CMD = util.which("blkid")
 BLKDEV_CMD = util.which("blockdev")
@@ -397,7 +398,7 @@ def check_partition_mbr_layout(device, layout):
 
 
 def check_partition_gpt_layout(device, layout):
-    prt_cmd = ['sgdisk', '-p', device]
+    prt_cmd = [SGDISK_CMD, '-p', device]
     try:
         out, _err = util.subp(prt_cmd)
     except Exception as e:
@@ -499,6 +500,29 @@ def get_partition_mbr_layout(size, layout):
     return sfdisk_definition
 
 
+def get_partition_gpt_layout(size, layout):
+    if isinstance(layout, bool):
+        return [(None, [0, 0])]
+
+    partition_specs = []
+    for partition in layout:
+        if isinstance(partition, list):
+            if len(partition) != 2:
+                raise Exception(
+                    "Partition was incorrectly defined: %s" % partition)
+            percent, partition_type = partition
+        else:
+            percent = partition
+            partition_type = None
+
+        part_size = int(float(size) * (float(percent) / 100))
+        partition_specs.append((partition_type, [0, '+{}'.format(part_size)]))
+
+    # The last partition should use up all remaining space
+    partition_specs[-1][-1][-1] = 0
+    return partition_specs
+
+
 def purge_disk_ptable(device):
     # wipe the first and last megabyte of a disk (or file)
     # gpt stores partition table both at front and at end.
@@ -574,6 +598,22 @@ def exec_mkpart_mbr(device, layout):
     read_parttbl(device)
 
 
+def exec_mkpart_gpt(device, layout):
+    try:
+        util.subp([SGDISK_CMD, '-Z', device])
+        for index, (partition_type, (start, end)) in enumerate(layout):
+            index += 1
+            util.subp([SGDISK_CMD,
+                       '-n', '{}:{}:{}'.format(index, start, end), device])
+            if partition_type is not None:
+                util.subp(
+                    [SGDISK_CMD,
+                     '-t', '{}:{}'.format(index, partition_type), device])
+    except Exception:
+        print "Failed to partition device %s" % (device,)
+        raise
+
+
 def exec_mkpart(table_type, device, layout):
     """
     Fetches the function for creating the table type.
-- 
cgit v1.2.3


From 54d1968f026cb0ee79913b599c2c90d9f07ef35d Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 13 Jan 2015 11:34:49 +0000
Subject: Find disk size differently for GPT.

MBR uses block sizes, which is what the current (apparently portable) code was
producing.  GPT uses sectors to determine partition size.
---
 cloudinit/config/cc_disk_setup.py | 42 +++++++++++++++++++++++----------------
 1 file changed, 25 insertions(+), 17 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index dc607533..d8553167 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -316,22 +316,6 @@ def is_disk_used(device):
     return False
 
 
-def get_hdd_size(device):
-    """
-    Returns the hard disk size.
-    This works with any disk type, including GPT.
-    """
-
-    size_cmd = [SFDISK_CMD, '--show-size', device]
-    size = None
-    try:
-        size, _err = util.subp(size_cmd)
-    except Exception as e:
-        raise Exception("Failed to get %s size\n%s" % (device, e))
-
-    return int(size.strip())
-
-
 def get_dyn_func(*args):
     """
     Call the appropriate function.
@@ -359,6 +343,30 @@ def get_dyn_func(*args):
         raise Exception("No such function %s to call!" % func_name)
 
 
+def get_mbr_hdd_size(device):
+    size_cmd = [SFDISK_CMD, '--show-size', device]
+    size = None
+    try:
+        size, _err = util.subp(size_cmd)
+    except Exception as e:
+        raise Exception("Failed to get %s size\n%s" % (device, e))
+
+    return int(size.strip())
+
+
+def get_gpt_hdd_size(device):
+    out, _ = util.subp([SGDISK_CMD, '-p', device])
+    return out.splitlines()[0].split()[2]
+
+
+def get_hdd_size(table_type, device):
+    """
+    Returns the hard disk size.
+    This works with any disk type, including GPT.
+    """
+    return get_dyn_func("get_%s_hdd_size", table_type, device)
+
+
 def check_partition_mbr_layout(device, layout):
     """
     Returns true if the partition layout matches the one on the disk
@@ -676,7 +684,7 @@ def mkpart(device, definition):
         return
 
     LOG.debug("Checking for device size")
-    device_size = get_hdd_size(device)
+    device_size = get_hdd_size(table_type, device)
 
     LOG.debug("Calculating partition layout")
     part_definition = get_partition_layout(table_type, device_size, layout)
-- 
cgit v1.2.3


From bd0c29c2f298ad060bba88ddbb4d2d11ab07cafe Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Wed, 14 Jan 2015 07:29:57 -0700
Subject: Use the short name for GCE hostnames per GCE's request (LP:
 #1383794).

---
 cloudinit/sources/DataSourceGCE.py          | 3 ++-
 tests/unittests/test_datasource/test_gce.py | 7 ++++---
 2 files changed, 6 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 92e5a28e..f877b3d1 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -124,7 +124,8 @@ class DataSourceGCE(sources.DataSource):
         return self.metadata['public-keys']
 
     def get_hostname(self, fqdn=False, _resolve_ip=False):
-        return self.metadata['local-hostname']
+        # GCE has long FDQN's and has asked for short hostnames
+        return self.metadata['local-hostname'].split('.')[0]
 
     def get_userdata_raw(self):
         return self.metadata['user-data']
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 89251b0e..06050bb1 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -99,7 +99,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
             body=_new_request_callback())
         self.ds.get_data()
 
-        self.assertEqual(GCE_META.get('instance/hostname'),
+        shostname = GCE_META.get('instance/hostname').split('.')[0]
+        self.assertEqual(shostname,
                          self.ds.get_hostname())
 
         self.assertEqual(GCE_META.get('instance/id'),
@@ -126,8 +127,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
         self.assertEqual(GCE_META_PARTIAL.get('instance/id'),
                          self.ds.get_instance_id())
 
-        self.assertEqual(GCE_META_PARTIAL.get('instance/hostname'),
-                         self.ds.get_hostname())
+        shostname = GCE_META_PARTIAL.get('instance/hostname').split('.')[0]
+        self.assertEqual(shostname, self.ds.get_hostname())
 
     @httpretty.activate
     def test_metadata_encoding(self):
-- 
cgit v1.2.3


From 28c8aa7270a04adea69065477b13cfc0dd244acc Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Wed, 14 Jan 2015 12:24:09 -0700
Subject: Drop reliance on dmidecode executable.

---
 cloudinit/sources/DataSourceAltCloud.py          | 27 +++-------
 cloudinit/sources/DataSourceCloudSigma.py        | 22 ++++----
 cloudinit/sources/DataSourceSmartOS.py           | 25 +++------
 cloudinit/util.py                                | 28 ++++++++++
 tests/unittests/test_datasource/test_altcloud.py | 66 ++++++++++--------------
 tests/unittests/test_util.py                     | 28 ++++++++++
 6 files changed, 106 insertions(+), 90 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 1e913a6e..1b0f72a1 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -40,7 +40,6 @@ LOG = logging.getLogger(__name__)
 CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
 
 # Shell command lists
-CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
 CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
 CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
 
@@ -100,11 +99,7 @@ class DataSourceAltCloud(sources.DataSource):
         '''
         Description:
             Get the type for the cloud back end this instance is running on
-            by examining the string returned by:
-            dmidecode --string system-product-name
-
-            On VMWare/vSphere dmidecode returns: RHEV Hypervisor
-            On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+            by examining the string returned by reading the dmi data.
 
         Input:
             None
@@ -117,26 +112,20 @@ class DataSourceAltCloud(sources.DataSource):
 
         uname_arch = os.uname()[4]
         if uname_arch.startswith("arm") or uname_arch == "aarch64":
-            # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+            # Disabling because dmi data is not available on ARM processors
             LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
             return 'UNKNOWN'
 
-        cmd = CMD_DMI_SYSTEM
-        try:
-            (cmd_out, _err) = util.subp(cmd)
-        except ProcessExecutionError, _err:
-            LOG.debug(('Failed command: %s\n%s') % \
-                (' '.join(cmd), _err.message))
-            return 'UNKNOWN'
-        except OSError, _err:
-            LOG.debug(('Failed command: %s\n%s') % \
-                (' '.join(cmd), _err.message))
+        system_name = util.read_dmi_data("system-product-name")
+        if not system_name:
             return 'UNKNOWN'
 
-        if cmd_out.upper().startswith('RHEV'):
+        sys_name = system_name.upper()
+
+        if sys_name.startswith('RHEV'):
             return 'RHEV'
 
-        if cmd_out.upper().startswith('VMWARE'):
+        if sys_name.startswith('VMWARE'):
             return 'VSPHERE'
 
         return 'UNKNOWN'
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 707cd0ce..76597116 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -44,27 +44,25 @@ class DataSourceCloudSigma(sources.DataSource):
 
     def is_running_in_cloudsigma(self):
         """
-        Uses dmidecode to detect if this instance of cloud-init is running
+        Uses dmi data to detect if this instance of cloud-init is running
         in the CloudSigma's infrastructure.
         """
         uname_arch = os.uname()[4]
         if uname_arch.startswith("arm") or uname_arch == "aarch64":
-            # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+            # Disabling because dmi data on ARM processors
             LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
             return False
 
-        dmidecode_path = util.which('dmidecode')
-        if not dmidecode_path:
+        LOG.debug("determining hypervisor product name via dmi data")
+        sys_product_name = util.read_dmi_data("system-product-name")
+        if not sys_product_name:
+            LOG.warn("failed to get hypervisor product name via dmi data")
             return False
+        else:
+            LOG.debug("detected hypervisor as {}".format(sys_product_name))
+            return 'cloudsigma' in sys_product_name.lower()
 
-        LOG.debug("Determining hypervisor product name via dmidecode")
-        try:
-            cmd = [dmidecode_path, "--string", "system-product-name"]
-            system_product_name, _ = util.subp(cmd)
-            return 'cloudsigma' in system_product_name.lower()
-        except:
-            LOG.warn("Failed to get hypervisor product name via dmidecode")
-
+        LOG.warn("failed to query dmi data for system product name")
         return False
 
     def get_data(self):
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 2733a2f6..86b8775a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -358,26 +358,13 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
 
 
 def dmi_data():
-    sys_uuid, sys_type = None, None
-    dmidecode_path = util.which('dmidecode')
-    if not dmidecode_path:
-        return False
+    sys_uuid = util.read_dmi_data("system-uuid")
+    sys_type = util.read_dmi_data("system-product-name")
+
+    if not sys_uuid or not sys_type:
+        return None
 
-    sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"]
-    try:
-        LOG.debug("Getting hostname from dmidecode")
-        (sys_uuid, _err) = util.subp(sys_uuid_cmd)
-    except Exception as e:
-        util.logexc(LOG, "Failed to get system UUID", e)
-
-    sys_type_cmd = [dmidecode_path, "-s", "system-product-name"]
-    try:
-        LOG.debug("Determining hypervisor product name via dmidecode")
-        (sys_type, _err) = util.subp(sys_type_cmd)
-    except Exception as e:
-        util.logexc(LOG, "Failed to get system UUID", e)
-
-    return (sys_uuid.lower().strip(), sys_type.strip())
+    return (sys_uuid.lower(), sys_type)
 
 
 def write_boot_content(content, content_f, link=None, shebang=False,
diff --git a/cloudinit/util.py b/cloudinit/util.py
index bf8e7d80..f7498b01 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -72,6 +72,9 @@ FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
 # Helper utils to see if running in a container
 CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
 
+# Path for DMI Data
+DMI_SYS_PATH = "/sys/class/dmi/id"
+
 
 class ProcessExecutionError(IOError):
 
@@ -2011,3 +2014,28 @@ def human2bytes(size):
         raise ValueError("'%s': cannot be negative" % size_in)
 
     return int(num * mpliers[mplier])
+
+
+def read_dmi_data(key):
+    """
+    Reads dmi data with from /sys/class/dmi/id
+    """
+
+    dmi_key = "{}/{}".format(DMI_SYS_PATH, key)
+    LOG.debug("querying dmi data {}".format(dmi_key))
+    try:
+        if not os.path.exists(dmi_key):
+            LOG.debug("did not find {}".format(dmi_key))
+            return None
+
+        key_data = load_file(dmi_key)
+        if not key_data:
+            LOG.debug("{} did not return any data".format(key))
+            return None
+
+        LOG.debug("dmi data {} returned {}".format(dmi_key, key_data))
+        return key_data.strip()
+
+    except Exception as e:
+        logexc(LOG, "failed read of {}".format(dmi_key), e)
+        return None
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index eaaa90e6..1a48ee5f 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -26,6 +26,7 @@ import shutil
 import tempfile
 
 from cloudinit import helpers
+from cloudinit import util
 from unittest import TestCase
 
 # Get the cloudinit.sources.DataSourceAltCloud import items needed.
@@ -98,6 +99,16 @@ def _remove_user_data_files(mount_dir,
             pass
 
 
+def _dmi_data(expected):
+    '''
+    Spoof the data received over DMI
+    '''
+    def _data(key):
+        return expected
+
+    return _data
+
+
 class TestGetCloudType(TestCase):
     '''
     Test to exercise method: DataSourceAltCloud.get_cloud_type()
@@ -106,24 +117,22 @@ class TestGetCloudType(TestCase):
     def setUp(self):
         '''Set up.'''
         self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+        self.dmi_data = util.read_dmi_data
         # We have a different code path for arm to deal with LP1243287
         # We have to switch arch to x86_64 to avoid test failure
         force_arch('x86_64')
 
     def tearDown(self):
         # Reset
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['dmidecode', '--string', 'system-product-name']
-        # Return back to original arch
+        util.read_dmi_data = self.dmi_data
         force_arch()
 
     def test_rhev(self):
         '''
         Test method get_cloud_type() for RHEVm systems.
-        Forcing dmidecode return to match a RHEVm system: RHEV Hypervisor
+        Forcing read_dmi_data return to match a RHEVm system: RHEV Hypervisor
         '''
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'RHEV Hypervisor']
+        util.read_dmi_data = _dmi_data('RHEV')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         self.assertEquals('RHEV', \
             dsrc.get_cloud_type())
@@ -131,10 +140,9 @@ class TestGetCloudType(TestCase):
     def test_vsphere(self):
         '''
         Test method get_cloud_type() for vSphere systems.
-        Forcing dmidecode return to match a vSphere system: RHEV Hypervisor
+        Forcing read_dmi_data return to match a vSphere system: RHEV Hypervisor
         '''
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'VMware Virtual Platform']
+        util.read_dmi_data = _dmi_data('VMware Virtual Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         self.assertEquals('VSPHERE', \
             dsrc.get_cloud_type())
@@ -142,30 +150,9 @@ class TestGetCloudType(TestCase):
     def test_unknown(self):
         '''
         Test method get_cloud_type() for unknown systems.
-        Forcing dmidecode return to match an unrecognized return.
-        '''
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'Unrecognized Platform']
-        dsrc = DataSourceAltCloud({}, None, self.paths)
-        self.assertEquals('UNKNOWN', \
-            dsrc.get_cloud_type())
-
-    def test_exception1(self):
-        '''
-        Test method get_cloud_type() where command dmidecode fails.
-        '''
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['ls', 'bad command']
-        dsrc = DataSourceAltCloud({}, None, self.paths)
-        self.assertEquals('UNKNOWN', \
-            dsrc.get_cloud_type())
-
-    def test_exception2(self):
-        '''
-        Test method get_cloud_type() where command dmidecode is not available.
+        Forcing read_dmi_data return to match an unrecognized return.
         '''
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['bad command']
+        util.read_dmi_data = _dmi_data('Unrecognized Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         self.assertEquals('UNKNOWN', \
             dsrc.get_cloud_type())
@@ -180,6 +167,7 @@ class TestGetDataCloudInfoFile(TestCase):
         '''Set up.'''
         self.paths = helpers.Paths({'cloud_dir': '/tmp'})
         self.cloud_info_file = tempfile.mkstemp()[1]
+        self.dmi_data = util.read_dmi_data
         cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
             self.cloud_info_file
 
@@ -192,6 +180,7 @@ class TestGetDataCloudInfoFile(TestCase):
         except OSError:
             pass
 
+        util.read_dmi_data = self.dmi_data
         cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
             '/etc/sysconfig/cloud-info'
 
@@ -243,6 +232,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
     def setUp(self):
         '''Set up.'''
         self.paths = helpers.Paths({'cloud_dir': '/tmp'})
+        self.dmi_data = util.read_dmi_data
         cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
             'no such file'
         # We have a different code path for arm to deal with LP1243287
@@ -253,16 +243,14 @@ class TestGetDataNoCloudInfoFile(TestCase):
         # Reset
         cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE = \
             '/etc/sysconfig/cloud-info'
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['dmidecode', '--string', 'system-product-name']
+        util.read_dmi_data = self.dmi_data
         # Return back to original arch
         force_arch()
 
     def test_rhev_no_cloud_file(self):
         '''Test No cloud info file module get_data() forcing RHEV.'''
 
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'RHEV Hypervisor']
+        util.read_dmi_data = _dmi_data('RHEV Hypervisor')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         dsrc.user_data_rhevm = lambda: True
         self.assertEquals(True, dsrc.get_data())
@@ -270,8 +258,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
     def test_vsphere_no_cloud_file(self):
         '''Test No cloud info file module get_data() forcing VSPHERE.'''
 
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'VMware Virtual Platform']
+        util.read_dmi_data = _dmi_data('VMware Virtual Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         dsrc.user_data_vsphere = lambda: True
         self.assertEquals(True, dsrc.get_data())
@@ -279,8 +266,7 @@ class TestGetDataNoCloudInfoFile(TestCase):
     def test_failure_no_cloud_file(self):
         '''Test No cloud info file module get_data() forcing unrecognized.'''
 
-        cloudinit.sources.DataSourceAltCloud.CMD_DMI_SYSTEM = \
-            ['echo', 'Unrecognized Platform']
+        util.read_dmi_data = _dmi_data('Unrecognized Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
         self.assertEquals(False, dsrc.get_data())
 
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 35e92445..6ae41bd6 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -310,4 +310,32 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
         expected = ('none', 'tmpfs', '/run/lock')
         self.assertEqual(expected, util.parse_mount_info('/run/lock', lines))
 
+
+class TestReadDMIData(helpers.FilesystemMockingTestCase):
+
+    def _patchIn(self, root):
+        self.restore()
+        self.patchOS(root)
+        self.patchUtils(root)
+
+    def _write_key(self, key, content):
+        new_root = self.makeDir()
+        self._patchIn(new_root)
+        util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
+
+        dmi_key = "/sys/class/dmi/id/{}".format(key)
+        util.write_file(dmi_key, content)
+
+    def test_key(self):
+        key_content = "TEST-KEY-DATA"
+        self._write_key("key", key_content)
+        self.assertEquals(key_content, util.read_dmi_data("key"))
+
+    def test_key_mismatch(self):
+        self._write_key("test", "ABC")
+        self.assertNotEqual("123",  util.read_dmi_data("test"))
+
+    def test_no_key(self):
+        self.assertFalse(util.read_dmi_data("key"))
+
 # vi: ts=4 expandtab
-- 
cgit v1.2.3


From ae2f5123faab45e71e4dcf3237d7dac59a4c66b7 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 16 Jan 2015 13:21:08 -0500
Subject: pep8 fixes

---
 cloudinit/sources/DataSourceGCE.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index f877b3d1..2cf8fdcd 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -29,6 +29,7 @@ BUILTIN_DS_CONFIG = {
 }
 REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
 
+
 class DataSourceGCE(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -59,7 +60,8 @@ class DataSourceGCE(sources.DataSource):
             ('local-hostname', 'instance/hostname', True),
             ('public-keys', 'project/attributes/sshKeys', False),
             ('user-data', 'instance/attributes/user-data', False),
-            ('user-data-encoding', 'instance/attributes/user-data-encoding', False),
+            ('user-data-encoding', 'instance/attributes/user-data-encoding',
+             False),
         ]
 
         # if we cannot resolve the metadata server, then no point in trying
@@ -106,7 +108,8 @@ class DataSourceGCE(sources.DataSource):
         encoding = self.metadata.get('user-data-encoding')
         if encoding:
             if encoding == 'base64':
-                self.metadata['user-data'] = b64decode(self.metadata['user-data'])
+                self.metadata['user-data'] = b64decode(
+                    self.metadata['user-data'])
             else:
                 LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
 
-- 
cgit v1.2.3


From 8d453d2a4da4492857a4487b14fe7b11a014115b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 16 Jan 2015 14:29:48 -0500
Subject: hostname: apply hostname same as is written

on RHEL, we were writing to persistent configuration the fqdn, but
invoking 'hostname' on the first boot with just the shortname.  On 'reboot',
then the hostname would differ.

Now, whatever we write, invoke hostname with.

Also remove some duplicate code.

LP: #1246485
---
 ChangeLog                     | 2 ++
 cloudinit/distros/__init__.py | 9 ++++++---
 cloudinit/distros/arch.py     | 7 -------
 cloudinit/distros/debian.py   | 7 -------
 cloudinit/distros/freebsd.py  | 5 -----
 cloudinit/distros/gentoo.py   | 7 -------
 cloudinit/distros/sles.py     | 7 -------
 7 files changed, 8 insertions(+), 36 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index da2a5a0c..ddde383a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -18,6 +18,8 @@
  - GCE: Allow base64 encoded user-data (LP: #1404311) [Wayne Witzell III]
  - GCE: use short hostname rather than fqdn (LP: #1383794) [Ben Howard]
  - systemd: make init stage run before login prompts shown [Steve Langasek]
+ - hostname: on first boot apply hostname to be same as is written for
+   persistent hostname.  (LP: #1246485)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index bf465442..5eab780b 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -86,7 +86,7 @@ class Distro(object):
     def set_hostname(self, hostname, fqdn=None):
         writeable_hostname = self._select_hostname(hostname, fqdn)
         self._write_hostname(writeable_hostname, self.hostname_conf_fn)
-        self._apply_hostname(hostname)
+        self._apply_hostname(writeable_hostname)
 
     @abc.abstractmethod
     def package_command(self, cmd, args=None, pkgs=None):
@@ -160,9 +160,12 @@ class Distro(object):
             util.logexc(LOG, "Failed to non-persistently adjust the system "
                         "hostname to %s", hostname)
 
-    @abc.abstractmethod
     def _select_hostname(self, hostname, fqdn):
-        raise NotImplementedError()
+        # Prefer the short hostname over the long
+        # fully qualified domain name
+        if not hostname:
+            return fqdn
+        return hostname
 
     @staticmethod
     def expand_osfamily(family_list):
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 005a0dd4..68bf1aab 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -118,13 +118,6 @@ class Distro(distros.Distro):
                 return False
         return True
 
-    def _select_hostname(self, hostname, fqdn):
-        # Prefer the short hostname over the long
-        # fully qualified domain name
-        if not hostname:
-            return fqdn
-        return hostname
-
     def _write_hostname(self, your_hostname, out_fn):
         conf = None
         try:
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 010be67d..b09eb094 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -86,13 +86,6 @@ class Distro(distros.Distro):
         else:
             return distros.Distro._bring_up_interfaces(self, device_names)
 
-    def _select_hostname(self, hostname, fqdn):
-        # Prefer the short hostname over the long
-        # fully qualified domain name
-        if not hostname:
-            return fqdn
-        return hostname
-
     def _write_hostname(self, your_hostname, out_fn):
         conf = None
         try:
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 9216510e..f1b4a256 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -150,11 +150,6 @@ class Distro(distros.Distro):
             return default
         return hostname
 
-    def _select_hostname(self, hostname, fqdn):
-        if not hostname:
-            return fqdn
-        return hostname
-
     def _write_hostname(self, hostname, filename):
         self.updatercconf('hostname', hostname)
 
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 45c2e658..09dd0d73 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -97,13 +97,6 @@ class Distro(distros.Distro):
         else:
             return distros.Distro._bring_up_interfaces(self, device_names)
 
-    def _select_hostname(self, hostname, fqdn):
-        # Prefer the short hostname over the long
-        # fully qualified domain name
-        if not hostname:
-            return fqdn
-        return hostname
-
     def _write_hostname(self, your_hostname, out_fn):
         conf = None
         try:
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index 9788a1ba..43682a12 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -115,13 +115,6 @@ class Distro(distros.Distro):
         conf.set_hostname(hostname)
         util.write_file(out_fn, str(conf), 0644)
 
-    def _select_hostname(self, hostname, fqdn):
-        # Prefer the short hostname over the long
-        # fully qualified domain name
-        if not hostname:
-            return fqdn
-        return hostname
-
     def _read_system_hostname(self):
         host_fn = self.hostname_conf_fn
         return (host_fn, self._read_hostname(host_fn))
-- 
cgit v1.2.3


From 063d33bf8bb277744abab2c1fff44af665dc2545 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 20 Jan 2015 15:59:35 +0000
Subject: New Azure disk_setup default.

---
 cloudinit/sources/DataSourceAzure.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 09bc196d..2ba1e2ad 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -53,9 +53,9 @@ BUILTIN_DS_CONFIG = {
 
 BUILTIN_CLOUD_CONFIG = {
     'disk_setup': {
-        'ephemeral0': {'table_type': 'mbr',
-                       'layout': True,
-                       'overwrite': False},
+        'ephemeral0': {'table_type': 'gpt',
+                       'layout': [100],
+                       'overwrite': True},
         },
     'fs_setup': [{'filesystem': 'ext4',
                   'device': 'ephemeral0.1',
-- 
cgit v1.2.3


From cccc0ff012d2e7b5c238609b22cc064b519e54a5 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Wed, 21 Jan 2015 15:35:56 -0500
Subject: Fix file modes to be Python 2/3 compatible.

---
 .bzrignore                                            | 1 +
 cloudinit/distros/__init__.py                         | 2 +-
 cloudinit/util.py                                     | 8 ++++----
 tests/unittests/test__init__.py                       | 2 +-
 tests/unittests/test_data.py                          | 2 +-
 tests/unittests/test_datasource/test_altcloud.py      | 2 +-
 tests/unittests/test_datasource/test_azure.py         | 2 +-
 tests/unittests/test_distros/test_netconfig.py        | 2 +-
 tests/unittests/test_handler/test_handler_ca_certs.py | 2 +-
 tests/unittests/test_handler/test_handler_growpart.py | 2 +-
 tests/unittests/test_runs/test_simple_run.py          | 2 +-
 tests/unittests/test_util.py                          | 6 +++---
 12 files changed, 17 insertions(+), 16 deletions(-)

(limited to 'cloudinit')

diff --git a/.bzrignore b/.bzrignore
index 32f5a949..926e4581 100644
--- a/.bzrignore
+++ b/.bzrignore
@@ -1,3 +1,4 @@
 .tox
 dist
 cloud_init.egg-info
+__pycache__
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 5eab780b..a913e15a 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -272,7 +272,7 @@ class Distro(object):
             if header:
                 contents.write("%s\n" % (header))
             contents.write("%s\n" % (eh))
-            util.write_file(self.hosts_fn, contents.getvalue(), mode=0644)
+            util.write_file(self.hosts_fn, contents.getvalue(), mode=0o644)
 
     def _bring_up_interface(self, device_name):
         cmd = ['ifup', device_name]
diff --git a/cloudinit/util.py b/cloudinit/util.py
index bf8e7d80..9efc704a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1250,7 +1250,7 @@ def rename(src, dest):
     os.rename(src, dest)
 
 
-def ensure_dirs(dirlist, mode=0755):
+def ensure_dirs(dirlist, mode=0o755):
     for d in dirlist:
         ensure_dir(d, mode)
 
@@ -1264,7 +1264,7 @@ def read_write_cmdline_url(target_fn):
             return
         try:
             if key and content:
-                write_file(target_fn, content, mode=0600)
+                write_file(target_fn, content, mode=0o600)
                 LOG.debug(("Wrote to %s with contents of command line"
                           " url %s (len=%s)"), target_fn, url, len(content))
             elif key and not content:
@@ -1489,7 +1489,7 @@ def append_file(path, content):
     write_file(path, content, omode="ab", mode=None)
 
 
-def ensure_file(path, mode=0644):
+def ensure_file(path, mode=0o644):
     write_file(path, content='', omode="ab", mode=mode)
 
 
@@ -1507,7 +1507,7 @@ def chmod(path, mode):
             os.chmod(path, real_mode)
 
 
-def write_file(filename, content, mode=0644, omode="wb"):
+def write_file(filename, content, mode=0o644, omode="wb"):
     """
     Writes a file with the given content and sets the file mode as specified.
     Resotres the SELinux context if possible.
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 17965488..48db1a5e 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -48,7 +48,7 @@ class TestWalkerHandleHandler(MockerTestCase):
         # Mock the write_file function
         write_file_mock = self.mocker.replace(util.write_file,
                                               passthrough=False)
-        write_file_mock(expected_file_fullname, self.payload, 0600)
+        write_file_mock(expected_file_fullname, self.payload, 0o600)
 
     def test_no_errors(self):
         """Payload gets written to file and added to C{pdata}."""
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index fd6bd8a1..5517f0b4 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -337,7 +337,7 @@ p: 1
 
         mock_write = self.mocker.replace("cloudinit.util.write_file",
                                          passthrough=False)
-        mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+        mock_write(ci.paths.get_ipath("cloud_config"), "", 0o600)
         self.mocker.replay()
 
         log_file = self.capture_log(logging.WARNING)
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index eaaa90e6..9d8a4a20 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -45,7 +45,7 @@ def _write_cloud_info_file(value):
     cifile = open(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 'w')
     cifile.write(value)
     cifile.close()
-    os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0664)
+    os.chmod(cloudinit.sources.DataSourceAltCloud.CLOUD_INFO_FILE, 0o664)
 
 
 def _remove_cloud_info_file():
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index e992a006..6e007a95 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -153,7 +153,7 @@ class TestAzureDataSource(MockerTestCase):
         ret = dsrc.get_data()
         self.assertTrue(ret)
         self.assertTrue(os.path.isdir(self.waagent_d))
-        self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0700)
+        self.assertEqual(stat.S_IMODE(os.stat(self.waagent_d).st_mode), 0o700)
 
     def test_user_cfg_set_agent_command_plain(self):
         # set dscfg in via plaintext
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 193338e8..47de034b 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -96,7 +96,7 @@ class TestNetCfgDistro(MockerTestCase):
 
         write_bufs = {}
 
-        def replace_write(filename, content, mode=0644, omode="wb"):
+        def replace_write(filename, content, mode=0o644, omode="wb"):
             buf = WriteBuffer()
             buf.mode = mode
             buf.omode = omode
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 0558023a..75df807e 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -150,7 +150,7 @@ class TestAddCaCerts(MockerTestCase):
         mock_load = self.mocker.replace(util.load_file, passthrough=False)
 
         mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
-                   cert, mode=0644)
+                   cert, mode=0o644)
 
         mock_load("/etc/ca-certificates.conf")
         self.mocker.result(ca_certs_content)
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index 5d0636d1..3056320d 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -145,7 +145,7 @@ class TestResize(MockerTestCase):
         # this patches out devent2dev, os.stat, and device_part_info
         # so in the end, doesn't test a lot
         devs = ["/dev/XXda1", "/dev/YYda2"]
-        devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5L,
+        devstat_ret = Bunch(st_mode=25008, st_ino=6078, st_dev=5,
                             st_nlink=1, st_uid=0, st_gid=6, st_size=0,
                             st_atime=0, st_mtime=0, st_ctime=0)
         enoent = ["/dev/NOENT"]
diff --git a/tests/unittests/test_runs/test_simple_run.py b/tests/unittests/test_runs/test_simple_run.py
index c9ba949e..2d51a337 100644
--- a/tests/unittests/test_runs/test_simple_run.py
+++ b/tests/unittests/test_runs/test_simple_run.py
@@ -41,7 +41,7 @@ class TestSimpleRun(helpers.FilesystemMockingTestCase):
                 {
                     'path': '/etc/blah.ini',
                     'content': 'blah',
-                    'permissions': 0755,
+                    'permissions': 0o755,
                 },
             ],
             'cloud_init_modules': ['write-files'],
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 35e92445..203445b7 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -79,7 +79,7 @@ class TestWriteFile(MockerTestCase):
             create_contents = f.read()
             self.assertEqual(contents, create_contents)
         file_stat = os.stat(path)
-        self.assertEqual(0644, stat.S_IMODE(file_stat.st_mode))
+        self.assertEqual(0o644, stat.S_IMODE(file_stat.st_mode))
 
     def test_dir_is_created_if_required(self):
         """Verifiy that directories are created is required."""
@@ -97,12 +97,12 @@ class TestWriteFile(MockerTestCase):
         path = os.path.join(self.tmp, "NewFile.txt")
         contents = "Hey there"
 
-        util.write_file(path, contents, mode=0666)
+        util.write_file(path, contents, mode=0o666)
 
         self.assertTrue(os.path.exists(path))
         self.assertTrue(os.path.isfile(path))
         file_stat = os.stat(path)
-        self.assertEqual(0666, stat.S_IMODE(file_stat.st_mode))
+        self.assertEqual(0o666, stat.S_IMODE(file_stat.st_mode))
 
     def test_custom_omode(self):
         """Verify custom omode works properly."""
-- 
cgit v1.2.3


From a64bb4febc79fcf641f6471d8cc00c74ca915f3d Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Wed, 21 Jan 2015 15:42:59 -0500
Subject: More octal literal fixes.

---
 cloudinit/distros/__init__.py                      |  6 ++---
 tests/unittests/test_data.py                       | 14 ++++++------
 tests/unittests/test_datasource/test_altcloud.py   |  4 ++--
 tests/unittests/test_distros/test_netconfig.py     | 26 +++++++++++-----------
 .../test_handler/test_handler_ca_certs.py          |  6 ++---
 5 files changed, 28 insertions(+), 28 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index a913e15a..49a0b652 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -468,7 +468,7 @@ class Distro(object):
                              util.make_header(base="added"),
                              "#includedir %s" % (path), '']
                     sudoers_contents = "\n".join(lines)
-                    util.write_file(sudo_base, sudoers_contents, 0440)
+                    util.write_file(sudo_base, sudoers_contents, 0o440)
                 else:
                     lines = ['', util.make_header(base="added"),
                              "#includedir %s" % (path), '']
@@ -478,7 +478,7 @@ class Distro(object):
             except IOError as e:
                 util.logexc(LOG, "Failed to write %s", sudo_base)
                 raise e
-        util.ensure_dir(path, 0750)
+        util.ensure_dir(path, 0o750)
 
     def write_sudo_rules(self, user, rules, sudo_file=None):
         if not sudo_file:
@@ -506,7 +506,7 @@ class Distro(object):
                 content,
             ]
             try:
-                util.write_file(sudo_file, "\n".join(contents), 0440)
+                util.write_file(sudo_file, "\n".join(contents), 0o440)
             except IOError as e:
                 util.logexc(LOG, "Failed to write sudoers file %s", sudo_file)
                 raise e
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 5517f0b4..03296e62 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -396,7 +396,7 @@ c: 4
 
         mock_write = self.mocker.replace("cloudinit.util.write_file",
                                          passthrough=False)
-        mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+        mock_write(ci.paths.get_ipath("cloud_config"), "", 0o600)
         self.mocker.replay()
 
         log_file = self.capture_log(logging.WARNING)
@@ -415,8 +415,8 @@ c: 4
         outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
         mock_write = self.mocker.replace("cloudinit.util.write_file",
                                          passthrough=False)
-        mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
-        mock_write(outpath, script, 0700)
+        mock_write(ci.paths.get_ipath("cloud_config"), "", 0o600)
+        mock_write(outpath, script, 0o700)
         self.mocker.replay()
 
         log_file = self.capture_log(logging.WARNING)
@@ -435,8 +435,8 @@ c: 4
         outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
         mock_write = self.mocker.replace("cloudinit.util.write_file",
                                          passthrough=False)
-        mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
-        mock_write(outpath, script, 0700)
+        mock_write(ci.paths.get_ipath("cloud_config"), "", 0o600)
+        mock_write(outpath, script, 0o700)
         self.mocker.replay()
 
         log_file = self.capture_log(logging.WARNING)
@@ -455,8 +455,8 @@ c: 4
         outpath = os.path.join(ci.paths.get_ipath_cur("scripts"), "part-001")
         mock_write = self.mocker.replace("cloudinit.util.write_file",
                                          passthrough=False)
-        mock_write(outpath, script, 0700)
-        mock_write(ci.paths.get_ipath("cloud_config"), "", 0600)
+        mock_write(outpath, script, 0o700)
+        mock_write(ci.paths.get_ipath("cloud_config"), "", 0o600)
         self.mocker.replay()
 
         log_file = self.capture_log(logging.WARNING)
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index 9d8a4a20..c74562d7 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -66,12 +66,12 @@ def _write_user_data_files(mount_dir, value):
     udfile = open(deltacloud_user_data_file, 'w')
     udfile.write(value)
     udfile.close()
-    os.chmod(deltacloud_user_data_file, 0664)
+    os.chmod(deltacloud_user_data_file, 0o664)
 
     udfile = open(user_data_file, 'w')
     udfile.write(value)
     udfile.close()
-    os.chmod(user_data_file, 0664)
+    os.chmod(user_data_file, 0o664)
 
 
 def _remove_user_data_files(mount_dir,
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 47de034b..33a1d6e1 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -112,7 +112,7 @@ class TestNetCfgDistro(MockerTestCase):
         self.assertIn('/etc/network/interfaces', write_bufs)
         write_buf = write_bufs['/etc/network/interfaces']
         self.assertEquals(str(write_buf).strip(), BASE_NET_CFG.strip())
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
     def assertCfgEquals(self, blob1, blob2):
         b1 = dict(SysConf(blob1.strip().splitlines()))
@@ -136,7 +136,7 @@ class TestNetCfgDistro(MockerTestCase):
 
         write_bufs = {}
 
-        def replace_write(filename, content, mode=0644, omode="wb"):
+        def replace_write(filename, content, mode=0o644, omode="wb"):
             buf = WriteBuffer()
             buf.mode = mode
             buf.omode = omode
@@ -169,7 +169,7 @@ DEVICE="lo"
 ONBOOT=yes
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
         self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
@@ -183,7 +183,7 @@ GATEWAY="192.168.1.254"
 BROADCAST="192.168.1.0"
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
         self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
@@ -193,7 +193,7 @@ BOOTPROTO="dhcp"
 ONBOOT=yes
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
         self.assertIn('/etc/sysconfig/network', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network']
@@ -202,7 +202,7 @@ ONBOOT=yes
 NETWORKING=yes
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
     def test_write_ipv6_rhel(self):
         rh_distro = self._get_distro('rhel')
@@ -215,7 +215,7 @@ NETWORKING=yes
 
         write_bufs = {}
 
-        def replace_write(filename, content, mode=0644, omode="wb"):
+        def replace_write(filename, content, mode=0o644, omode="wb"):
             buf = WriteBuffer()
             buf.mode = mode
             buf.omode = omode
@@ -248,7 +248,7 @@ DEVICE="lo"
 ONBOOT=yes
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
         self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth0', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth0']
@@ -265,7 +265,7 @@ IPV6ADDR="2607:f0d0:1002:0011::2"
 IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
         self.assertIn('/etc/sysconfig/network-scripts/ifcfg-eth1', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network-scripts/ifcfg-eth1']
         expected_buf = '''
@@ -281,7 +281,7 @@ IPV6ADDR="2607:f0d0:1002:0011::3"
 IPV6_DEFAULTGW="2607:f0d0:1002:0011::1"
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
         self.assertIn('/etc/sysconfig/network', write_bufs)
         write_buf = write_bufs['/etc/sysconfig/network']
@@ -292,7 +292,7 @@ NETWORKING_IPV6=yes
 IPV6_AUTOCONF=no
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
 
     def test_simple_write_freebsd(self):
         fbsd_distro = self._get_distro('freebsd')
@@ -319,7 +319,7 @@ IPV6_AUTOCONF=no
             '/etc/resolv.conf': '',
         }
 
-        def replace_write(filename, content, mode=0644, omode="wb"):
+        def replace_write(filename, content, mode=0o644, omode="wb"):
             buf = WriteBuffer()
             buf.mode = mode
             buf.omode = omode
@@ -355,4 +355,4 @@ ifconfig_vtnet1="DHCP"
 defaultrouter="192.168.1.254"
 '''
         self.assertCfgEquals(expected_buf, str(write_buf))
-        self.assertEquals(write_buf.mode, 0644)
+        self.assertEquals(write_buf.mode, 0o644)
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 75df807e..7fe47b74 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -171,7 +171,7 @@ class TestAddCaCerts(MockerTestCase):
         mock_load = self.mocker.replace(util.load_file, passthrough=False)
 
         mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
-                   cert, mode=0644)
+                   cert, mode=0o644)
 
         mock_load("/etc/ca-certificates.conf")
         self.mocker.result(ca_certs_content)
@@ -192,7 +192,7 @@ class TestAddCaCerts(MockerTestCase):
         mock_load = self.mocker.replace(util.load_file, passthrough=False)
 
         mock_write("/usr/share/ca-certificates/cloud-init-ca-certs.crt",
-                   expected_cert_file, mode=0644)
+                   expected_cert_file, mode=0o644)
 
         ca_certs_content = "line1\nline2\nline3"
         mock_load("/etc/ca-certificates.conf")
@@ -233,7 +233,7 @@ class TestRemoveDefaultCaCerts(MockerTestCase):
 
         mock_delete_dir_contents("/usr/share/ca-certificates/")
         mock_delete_dir_contents("/etc/ssl/certs/")
-        mock_write("/etc/ca-certificates.conf", "", mode=0644)
+        mock_write("/etc/ca-certificates.conf", "", mode=0o644)
         mock_subp(('debconf-set-selections', '-'),
                   "ca-certificates ca-certificates/trust_new_crts select no")
         self.mocker.replay()
-- 
cgit v1.2.3


From c80892c9c326716724c3ff06d9a82516a4152c74 Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Wed, 21 Jan 2015 15:42:55 -0700
Subject: Use either syspath or dmidecode based on the availability.

---
 cloudinit/util.py            | 35 ++++++++++++++++++++++++++++++++++-
 tests/unittests/test_util.py | 30 +++++++++++++++++++++++++++++-
 2 files changed, 63 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index f7498b01..26456aa6 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2016,7 +2016,7 @@ def human2bytes(size):
     return int(num * mpliers[mplier])
 
 
-def read_dmi_data(key):
+def _read_dmi_syspath(key):
     """
     Reads dmi data with from /sys/class/dmi/id
     """
@@ -2039,3 +2039,36 @@ def read_dmi_data(key):
     except Exception as e:
         logexc(LOG, "failed read of {}".format(dmi_key), e)
         return None
+
+
+def _call_dmidecode(key, dmidecode_path):
+    """
+    Calls out to dmidecode to get the data out. This is mostly for supporting
+    OS's without /sys/class/dmi/id support.
+    """
+    try:
+        cmd = [dmidecode_path, "--string", key]
+        (result, _err) = subp(cmd)
+        LOG.debug("dmidecode returned '{}' for '{}'".format(result, key))
+        return result
+    except OSError, _err:
+        LOG.debug('failed dmidecode cmd: {}\n{}'.format(cmd, _err.message))
+        return None
+
+
+def read_dmi_data(key):
+    """
+    Wrapper for reading DMI data. This tries to determine whether the DMI
+    Data can be read directly, otherwise it will fallback to using dmidecode.
+    """
+    if os.path.exists(DMI_SYS_PATH):
+        return _read_dmi_syspath(key)
+
+    dmidecode_path = which('dmidecode')
+    if dmidecode_path:
+        return _call_dmidecode(key, dmidecode_path)
+
+    LOG.warn("did not find either path {} or dmidecode command".format(
+             DMI_SYS_PATH))
+
+    return None
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 6ae41bd6..3e079131 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -319,6 +319,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
         self.patchUtils(root)
 
     def _write_key(self, key, content):
+        """Mocks the sys path found on Linux systems."""
         new_root = self.makeDir()
         self._patchIn(new_root)
         util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
@@ -326,6 +327,24 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
         dmi_key = "/sys/class/dmi/id/{}".format(key)
         util.write_file(dmi_key, content)
 
+    def _no_syspath(self, key, content):
+        """
+        In order to test a missing sys path and call outs to dmidecode, this
+        function fakes the results of dmidecode to test the results.
+        """
+        new_root = self.makeDir()
+        self._patchIn(new_root)
+        self.real_which = util.which
+        self.real_subp = util.subp
+
+        def _which(key):
+            return True
+        util.which = _which
+
+        def _cdd(_key, error=None):
+            return (content, error)
+        util.subp = _cdd
+
     def test_key(self):
         key_content = "TEST-KEY-DATA"
         self._write_key("key", key_content)
@@ -333,9 +352,18 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
 
     def test_key_mismatch(self):
         self._write_key("test", "ABC")
-        self.assertNotEqual("123",  util.read_dmi_data("test"))
+        self.assertNotEqual("123", util.read_dmi_data("test"))
 
     def test_no_key(self):
+        self._no_syspath(None, None)
         self.assertFalse(util.read_dmi_data("key"))
 
+    def test_callout_dmidecode(self):
+        """test to make sure that dmidecode is used when no syspath"""
+        self._no_syspath("key", "stuff")
+        self.assertEquals("stuff", util.read_dmi_data("key"))
+        self._no_syspath("key", None)
+        self.assertFalse(None, util.read_dmi_data("key"))
+
+
 # vi: ts=4 expandtab
-- 
cgit v1.2.3


From f895cb12141281702b34da18f2384deb64c881e7 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Wed, 21 Jan 2015 17:56:53 -0500
Subject: Largely merge lp:~harlowja/cloud-init/py2-3 albeit manually because
 it seemed to be behind trunk.

`tox -e py27` passes full test suite.  Now to work on replacing mocker.
---
 cloudinit/config/cc_apt_configure.py               |   2 +-
 cloudinit/config/cc_debug.py                       |   7 +-
 cloudinit/config/cc_landscape.py                   |   2 +-
 cloudinit/config/cc_mcollective.py                 |  15 +--
 cloudinit/config/cc_phone_home.py                  |   4 +-
 cloudinit/config/cc_puppet.py                      |   8 +-
 cloudinit/config/cc_resolv_conf.py                 |   4 +-
 cloudinit/config/cc_seed_random.py                 |   3 +-
 cloudinit/config/cc_ssh.py                         |  16 +--
 cloudinit/config/cc_yum_add_repo.py                |   7 +-
 cloudinit/distros/__init__.py                      |  55 ++++++-----
 cloudinit/distros/arch.py                          |   2 +-
 cloudinit/distros/freebsd.py                       |  12 ++-
 cloudinit/distros/net_util.py                      |   2 +-
 cloudinit/distros/parsers/hostname.py              |   2 +-
 cloudinit/distros/parsers/hosts.py                 |   2 +-
 cloudinit/distros/parsers/resolv_conf.py           |   2 +-
 cloudinit/distros/parsers/sys_conf.py              |   5 +-
 cloudinit/distros/rhel.py                          |   2 +-
 cloudinit/distros/sles.py                          |   2 +-
 cloudinit/ec2_utils.py                             |   9 +-
 cloudinit/handlers/__init__.py                     |   2 +-
 cloudinit/handlers/boot_hook.py                    |   2 +-
 cloudinit/handlers/cloud_config.py                 |   2 +-
 cloudinit/handlers/shell_script.py                 |   2 +-
 cloudinit/handlers/upstart_job.py                  |   2 +-
 cloudinit/helpers.py                               |  13 +--
 cloudinit/log.py                                   |   7 +-
 cloudinit/mergers/__init__.py                      |   4 +-
 cloudinit/mergers/m_dict.py                        |   4 +-
 cloudinit/mergers/m_list.py                        |   6 +-
 cloudinit/mergers/m_str.py                         |  10 +-
 cloudinit/netinfo.py                               |   4 +-
 cloudinit/signal_handler.py                        |   2 +-
 cloudinit/sources/DataSourceConfigDrive.py         |   4 +-
 cloudinit/sources/DataSourceDigitalOcean.py        |   9 +-
 cloudinit/sources/DataSourceEc2.py                 |   4 +-
 cloudinit/sources/DataSourceMAAS.py                |   2 +-
 cloudinit/sources/DataSourceOVF.py                 |   6 +-
 cloudinit/sources/DataSourceSmartOS.py             |  15 +--
 cloudinit/sources/__init__.py                      |  10 +-
 cloudinit/sources/helpers/openstack.py             |  10 +-
 cloudinit/ssh_util.py                              |   6 +-
 cloudinit/stages.py                                |  23 ++---
 cloudinit/type_utils.py                            |  32 ++++--
 cloudinit/url_helper.py                            |  22 +++--
 cloudinit/user_data.py                             |   8 +-
 cloudinit/util.py                                  | 109 +++++++++++++--------
 packages/bddeb                                     |   1 +
 packages/brpm                                      |   2 +
 tests/unittests/test_data.py                       |  12 +--
 tests/unittests/test_datasource/test_nocloud.py    |   2 +-
 tests/unittests/test_datasource/test_openstack.py  |   7 +-
 tests/unittests/test_distros/test_netconfig.py     |   4 +-
 .../test_handler/test_handler_apt_configure.py     |  10 +-
 .../unittests/test_handler/test_handler_locale.py  |   6 +-
 .../test_handler/test_handler_seed_random.py       |   2 +-
 .../test_handler/test_handler_set_hostname.py      |   6 +-
 .../test_handler/test_handler_timezone.py          |   6 +-
 .../test_handler/test_handler_yum_add_repo.py      |   7 +-
 60 files changed, 315 insertions(+), 233 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index f10b76a3..de72903f 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -126,7 +126,7 @@ def mirror2lists_fileprefix(mirror):
 
 
 def rename_apt_lists(old_mirrors, new_mirrors, lists_d="/var/lib/apt/lists"):
-    for (name, omirror) in old_mirrors.iteritems():
+    for (name, omirror) in old_mirrors.items():
         nmirror = new_mirrors.get(name)
         if not nmirror:
             continue
diff --git a/cloudinit/config/cc_debug.py b/cloudinit/config/cc_debug.py
index 8c489426..bdc32fe6 100644
--- a/cloudinit/config/cc_debug.py
+++ b/cloudinit/config/cc_debug.py
@@ -34,7 +34,8 @@ It can be configured with the following option structure::
 """
 
 import copy
-from StringIO import StringIO
+
+from six import StringIO
 
 from cloudinit import type_utils
 from cloudinit import util
@@ -77,7 +78,7 @@ def handle(name, cfg, cloud, log, args):
     dump_cfg = copy.deepcopy(cfg)
     for k in SKIP_KEYS:
         dump_cfg.pop(k, None)
-    all_keys = list(dump_cfg.keys())
+    all_keys = list(dump_cfg)
     for k in all_keys:
         if k.startswith("_"):
             dump_cfg.pop(k, None)
@@ -103,6 +104,6 @@ def handle(name, cfg, cloud, log, args):
         line = "ci-info: %s\n" % (line)
         content_to_file.append(line)
     if out_file:
-        util.write_file(out_file, "".join(content_to_file), 0644, "w")
+        util.write_file(out_file, "".join(content_to_file), 0o644, "w")
     else:
         util.multi_log("".join(content_to_file), console=True, stderr=False)
diff --git a/cloudinit/config/cc_landscape.py b/cloudinit/config/cc_landscape.py
index 8a709677..0b9d846e 100644
--- a/cloudinit/config/cc_landscape.py
+++ b/cloudinit/config/cc_landscape.py
@@ -20,7 +20,7 @@
 
 import os
 
-from StringIO import StringIO
+from six import StringIO
 
 from configobj import ConfigObj
 
diff --git a/cloudinit/config/cc_mcollective.py b/cloudinit/config/cc_mcollective.py
index b670390d..425420ae 100644
--- a/cloudinit/config/cc_mcollective.py
+++ b/cloudinit/config/cc_mcollective.py
@@ -19,7 +19,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+import six
+from six import StringIO
 
 # Used since this can maintain comments
 # and doesn't need a top level section
@@ -51,17 +52,17 @@ def handle(name, cfg, cloud, log, _args):
         # original file in order to be able to mix the rest up
         mcollective_config = ConfigObj(SERVER_CFG)
         # See: http://tiny.cc/jh9agw
-        for (cfg_name, cfg) in mcollective_cfg['conf'].iteritems():
+        for (cfg_name, cfg) in mcollective_cfg['conf'].items():
             if cfg_name == 'public-cert':
-                util.write_file(PUBCERT_FILE, cfg, mode=0644)
+                util.write_file(PUBCERT_FILE, cfg, mode=0o644)
                 mcollective_config['plugin.ssl_server_public'] = PUBCERT_FILE
                 mcollective_config['securityprovider'] = 'ssl'
             elif cfg_name == 'private-cert':
-                util.write_file(PRICERT_FILE, cfg, mode=0600)
+                util.write_file(PRICERT_FILE, cfg, mode=0o600)
                 mcollective_config['plugin.ssl_server_private'] = PRICERT_FILE
                 mcollective_config['securityprovider'] = 'ssl'
             else:
-                if isinstance(cfg, (basestring, str)):
+                if isinstance(cfg, six.string_types):
                     # Just set it in the 'main' section
                     mcollective_config[cfg_name] = cfg
                 elif isinstance(cfg, (dict)):
@@ -69,7 +70,7 @@ def handle(name, cfg, cloud, log, _args):
                     # if it is needed and then add/or create items as needed
                     if cfg_name not in mcollective_config.sections:
                         mcollective_config[cfg_name] = {}
-                    for (o, v) in cfg.iteritems():
+                    for (o, v) in cfg.items():
                         mcollective_config[cfg_name][o] = v
                 else:
                     # Otherwise just try to convert it to a string
@@ -81,7 +82,7 @@ def handle(name, cfg, cloud, log, _args):
         contents = StringIO()
         mcollective_config.write(contents)
         contents = contents.getvalue()
-        util.write_file(SERVER_CFG, contents, mode=0644)
+        util.write_file(SERVER_CFG, contents, mode=0o644)
 
     # Start mcollective
     util.subp(['service', 'mcollective', 'start'], capture=False)
diff --git a/cloudinit/config/cc_phone_home.py b/cloudinit/config/cc_phone_home.py
index 5bc68b83..18a7ddad 100644
--- a/cloudinit/config/cc_phone_home.py
+++ b/cloudinit/config/cc_phone_home.py
@@ -81,7 +81,7 @@ def handle(name, cfg, cloud, log, args):
         'pub_key_ecdsa': '/etc/ssh/ssh_host_ecdsa_key.pub',
     }
 
-    for (n, path) in pubkeys.iteritems():
+    for (n, path) in pubkeys.items():
         try:
             all_keys[n] = util.load_file(path)
         except:
@@ -99,7 +99,7 @@ def handle(name, cfg, cloud, log, args):
 
     # Get them read to be posted
     real_submit_keys = {}
-    for (k, v) in submit_keys.iteritems():
+    for (k, v) in submit_keys.items():
         if v is None:
             real_submit_keys[k] = 'N/A'
         else:
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 471a1a8a..6f1b3c57 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -18,7 +18,7 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+from six import StringIO
 
 import os
 import socket
@@ -81,13 +81,13 @@ def handle(name, cfg, cloud, log, _args):
         cleaned_contents = '\n'.join(cleaned_lines)
         puppet_config.readfp(StringIO(cleaned_contents),
                              filename=PUPPET_CONF_PATH)
-        for (cfg_name, cfg) in puppet_cfg['conf'].iteritems():
+        for (cfg_name, cfg) in puppet_cfg['conf'].items():
             # Cert configuration is a special case
             # Dump the puppet master ca certificate in the correct place
             if cfg_name == 'ca_cert':
                 # Puppet ssl sub-directory isn't created yet
                 # Create it with the proper permissions and ownership
-                util.ensure_dir(PUPPET_SSL_DIR, 0771)
+                util.ensure_dir(PUPPET_SSL_DIR, 0o771)
                 util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
                 util.ensure_dir(PUPPET_SSL_CERT_DIR)
                 util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
@@ -96,7 +96,7 @@ def handle(name, cfg, cloud, log, _args):
             else:
                 # Iterate throug the config items, we'll use ConfigParser.set
                 # to overwrite or create new items as needed
-                for (o, v) in cfg.iteritems():
+                for (o, v) in cfg.items():
                     if o == 'certname':
                         # Expand %f as the fqdn
                         # TODO(harlowja) should this use the cloud fqdn??
diff --git a/cloudinit/config/cc_resolv_conf.py b/cloudinit/config/cc_resolv_conf.py
index bbaa6c63..71d9e3a7 100644
--- a/cloudinit/config/cc_resolv_conf.py
+++ b/cloudinit/config/cc_resolv_conf.py
@@ -66,8 +66,8 @@ def generate_resolv_conf(template_fn, params, target_fname="/etc/resolv.conf"):
     false_flags = []
 
     if 'options' in params:
-        for key, val in params['options'].iteritems():
-            if type(val) == bool:
+        for key, val in params['options'].items():
+            if isinstance(val, bool):
                 if val:
                     flags.append(key)
                 else:
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 49a6b3e8..3b7235bf 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -21,7 +21,8 @@
 
 import base64
 import os
-from StringIO import StringIO
+
+from six import StringIO
 
 from cloudinit.settings import PER_INSTANCE
 from cloudinit import log as logging
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 4c76581c..ab6940fa 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -34,12 +34,12 @@ DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
 "rather than the user \\\"root\\\".\';echo;sleep 10\"")
 
 KEY_2_FILE = {
-    "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0600),
-    "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0644),
-    "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0600),
-    "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0644),
-    "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0600),
-    "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0644),
+    "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0o600),
+    "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0o644),
+    "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0o600),
+    "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0o644),
+    "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0o600),
+    "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0o644),
 }
 
 PRIV_2_PUB = {
@@ -68,13 +68,13 @@ def handle(_name, cfg, cloud, log, _args):
 
     if "ssh_keys" in cfg:
         # if there are keys in cloud-config, use them
-        for (key, val) in cfg["ssh_keys"].iteritems():
+        for (key, val) in cfg["ssh_keys"].items():
             if key in KEY_2_FILE:
                 tgt_fn = KEY_2_FILE[key][0]
                 tgt_perms = KEY_2_FILE[key][1]
                 util.write_file(tgt_fn, val, tgt_perms)
 
-        for (priv, pub) in PRIV_2_PUB.iteritems():
+        for (priv, pub) in PRIV_2_PUB.items():
             if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                 continue
             pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 0d836f28..3b821af9 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -18,9 +18,10 @@
 
 import os
 
-from cloudinit import util
-
 import configobj
+import six
+
+from cloudinit import util
 
 
 def _canonicalize_id(repo_id):
@@ -37,7 +38,7 @@ def _format_repo_value(val):
         # Can handle 'lists' in certain cases
         # See: http://bit.ly/Qqrf1t
         return "\n    ".join([_format_repo_value(v) for v in val])
-    if not isinstance(val, (basestring, str)):
+    if not isinstance(val, six.string_types):
         return str(val)
     return val
 
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 49a0b652..4ebccdda 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -21,7 +21,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+import six
+from six import StringIO
 
 import abc
 import itertools
@@ -334,7 +335,7 @@ class Distro(object):
         redact_opts = ['passwd']
 
         # Check the values and create the command
-        for key, val in kwargs.iteritems():
+        for key, val in kwargs.items():
 
             if key in adduser_opts and val and isinstance(val, str):
                 adduser_cmd.extend([adduser_opts[key], val])
@@ -393,7 +394,7 @@ class Distro(object):
         if 'ssh_authorized_keys' in kwargs:
             # Try to handle this in a smart manner.
             keys = kwargs['ssh_authorized_keys']
-            if isinstance(keys, (basestring, str)):
+            if isinstance(keys, six.string_types):
                 keys = [keys]
             if isinstance(keys, dict):
                 keys = list(keys.values())
@@ -491,7 +492,7 @@ class Distro(object):
         if isinstance(rules, (list, tuple)):
             for rule in rules:
                 lines.append("%s %s" % (user, rule))
-        elif isinstance(rules, (basestring, str)):
+        elif isinstance(rules, six.string_types):
             lines.append("%s %s" % (user, rules))
         else:
             msg = "Can not create sudoers rule addition with type %r"
@@ -561,10 +562,10 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
         subst['ec2_region'] = "%s" % availability_zone[0:-1]
 
     results = {}
-    for (name, mirror) in mirror_info.get('failsafe', {}).iteritems():
+    for (name, mirror) in mirror_info.get('failsafe', {}).items():
         results[name] = mirror
 
-    for (name, searchlist) in mirror_info.get('search', {}).iteritems():
+    for (name, searchlist) in mirror_info.get('search', {}).items():
         mirrors = []
         for tmpl in searchlist:
             try:
@@ -604,30 +605,30 @@ def _get_arch_package_mirror_info(package_mirrors, arch):
 # is the standard form used in the rest
 # of cloud-init
 def _normalize_groups(grp_cfg):
-    if isinstance(grp_cfg, (str, basestring)):
+    if isinstance(grp_cfg, six.string_types):
         grp_cfg = grp_cfg.strip().split(",")
-    if isinstance(grp_cfg, (list)):
+    if isinstance(grp_cfg, list):
         c_grp_cfg = {}
         for i in grp_cfg:
-            if isinstance(i, (dict)):
+            if isinstance(i, dict):
                 for k, v in i.items():
                     if k not in c_grp_cfg:
-                        if isinstance(v, (list)):
+                        if isinstance(v, list):
                             c_grp_cfg[k] = list(v)
-                        elif isinstance(v, (basestring, str)):
+                        elif isinstance(v, six.string_types):
                             c_grp_cfg[k] = [v]
                         else:
                             raise TypeError("Bad group member type %s" %
                                             type_utils.obj_name(v))
                     else:
-                        if isinstance(v, (list)):
+                        if isinstance(v, list):
                             c_grp_cfg[k].extend(v)
-                        elif isinstance(v, (basestring, str)):
+                        elif isinstance(v, six.string_types):
                             c_grp_cfg[k].append(v)
                         else:
                             raise TypeError("Bad group member type %s" %
                                             type_utils.obj_name(v))
-            elif isinstance(i, (str, basestring)):
+            elif isinstance(i, six.string_types):
                 if i not in c_grp_cfg:
                     c_grp_cfg[i] = []
             else:
@@ -635,7 +636,7 @@ def _normalize_groups(grp_cfg):
                                 type_utils.obj_name(i))
         grp_cfg = c_grp_cfg
     groups = {}
-    if isinstance(grp_cfg, (dict)):
+    if isinstance(grp_cfg, dict):
         for (grp_name, grp_members) in grp_cfg.items():
             groups[grp_name] = util.uniq_merge_sorted(grp_members)
     else:
@@ -661,29 +662,29 @@ def _normalize_groups(grp_cfg):
 # entry 'default' which will be marked as true
 # all other users will be marked as false.
 def _normalize_users(u_cfg, def_user_cfg=None):
-    if isinstance(u_cfg, (dict)):
+    if isinstance(u_cfg, dict):
         ad_ucfg = []
         for (k, v) in u_cfg.items():
-            if isinstance(v, (bool, int, basestring, str, float)):
+            if isinstance(v, (bool, int, float) + six.string_types):
                 if util.is_true(v):
                     ad_ucfg.append(str(k))
-            elif isinstance(v, (dict)):
+            elif isinstance(v, dict):
                 v['name'] = k
                 ad_ucfg.append(v)
             else:
                 raise TypeError(("Unmappable user value type %s"
                                  " for key %s") % (type_utils.obj_name(v), k))
         u_cfg = ad_ucfg
-    elif isinstance(u_cfg, (str, basestring)):
+    elif isinstance(u_cfg, six.string_types):
         u_cfg = util.uniq_merge_sorted(u_cfg)
 
     users = {}
     for user_config in u_cfg:
-        if isinstance(user_config, (str, basestring, list)):
+        if isinstance(user_config, (list,) + six.string_types):
             for u in util.uniq_merge(user_config):
                 if u and u not in users:
                     users[u] = {}
-        elif isinstance(user_config, (dict)):
+        elif isinstance(user_config, dict):
             if 'name' in user_config:
                 n = user_config.pop('name')
                 prev_config = users.get(n) or {}
@@ -784,11 +785,11 @@ def normalize_users_groups(cfg, distro):
         old_user = cfg['user']
         # Translate it into the format that is more useful
         # going forward
-        if isinstance(old_user, (basestring, str)):
+        if isinstance(old_user, six.string_types):
             old_user = {
                 'name': old_user,
             }
-        if not isinstance(old_user, (dict)):
+        if not isinstance(old_user, dict):
             LOG.warn(("Format for 'user' key must be a string or "
                       "dictionary and not %s"), type_utils.obj_name(old_user))
             old_user = {}
@@ -813,7 +814,7 @@ def normalize_users_groups(cfg, distro):
     default_user_config = util.mergemanydict([old_user, distro_user_config])
 
     base_users = cfg.get('users', [])
-    if not isinstance(base_users, (list, dict, str, basestring)):
+    if not isinstance(base_users, (list, dict) + six.string_types):
         LOG.warn(("Format for 'users' key must be a comma separated string"
                   " or a dictionary or a list and not %s"),
                  type_utils.obj_name(base_users))
@@ -822,12 +823,12 @@ def normalize_users_groups(cfg, distro):
     if old_user:
         # Ensure that when user: is provided that this user
         # always gets added (as the default user)
-        if isinstance(base_users, (list)):
+        if isinstance(base_users, list):
             # Just add it on at the end...
             base_users.append({'name': 'default'})
-        elif isinstance(base_users, (dict)):
+        elif isinstance(base_users, dict):
             base_users['default'] = dict(base_users).get('default', True)
-        elif isinstance(base_users, (str, basestring)):
+        elif isinstance(base_users, six.string_types):
             # Just append it on to be re-parsed later
             base_users += ",default"
 
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 68bf1aab..e540e0bc 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -66,7 +66,7 @@ class Distro(distros.Distro):
                   settings, entries)
         dev_names = entries.keys()
         # Format for netctl
-        for (dev, info) in entries.iteritems():
+        for (dev, info) in entries.items():
             nameservers = []
             net_fn = self.network_conf_dir + dev
             net_cfg = {
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index f1b4a256..4c484639 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -16,7 +16,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+import six
+from six import StringIO
 
 import re
 
@@ -203,8 +204,9 @@ class Distro(distros.Distro):
 
         redact_opts = ['passwd']
 
-        for key, val in kwargs.iteritems():
-            if key in adduser_opts and val and isinstance(val, basestring):
+        for key, val in kwargs.items():
+            if (key in adduser_opts and val
+                    and isinstance(val, six.string_types)):
                 adduser_cmd.extend([adduser_opts[key], val])
 
                 # Redact certain fields from the logs
@@ -271,7 +273,7 @@ class Distro(distros.Distro):
         nameservers = []
         searchdomains = []
         dev_names = entries.keys()
-        for (device, info) in entries.iteritems():
+        for (device, info) in entries.items():
             # Skip the loopback interface.
             if device.startswith('lo'):
                 continue
@@ -323,7 +325,7 @@ class Distro(distros.Distro):
                 resolvconf.add_search_domain(domain)
             except ValueError:
                 util.logexc(LOG, "Failed to add search domain %s", domain)
-        util.write_file(self.resolv_conf_fn, str(resolvconf), 0644)
+        util.write_file(self.resolv_conf_fn, str(resolvconf), 0o644)
 
         return dev_names
 
diff --git a/cloudinit/distros/net_util.py b/cloudinit/distros/net_util.py
index 8b28e2d1..cadfa6b6 100644
--- a/cloudinit/distros/net_util.py
+++ b/cloudinit/distros/net_util.py
@@ -103,7 +103,7 @@ def translate_network(settings):
             consume[cmd] = args
     # Check if anything left over to consume
     absorb = False
-    for (cmd, args) in consume.iteritems():
+    for (cmd, args) in consume.items():
         if cmd == 'iface':
             absorb = True
     if absorb:
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index 617b3c36..84a1de42 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -16,7 +16,7 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+from six import StringIO
 
 from cloudinit.distros.parsers import chop_comment
 
diff --git a/cloudinit/distros/parsers/hosts.py b/cloudinit/distros/parsers/hosts.py
index 94c97051..3c5498ee 100644
--- a/cloudinit/distros/parsers/hosts.py
+++ b/cloudinit/distros/parsers/hosts.py
@@ -16,7 +16,7 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+from six import StringIO
 
 from cloudinit.distros.parsers import chop_comment
 
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 5733c25a..8aee03a4 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -16,7 +16,7 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+from six import StringIO
 
 from cloudinit import util
 
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index 20ca1871..d795e12f 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -16,7 +16,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
+import six
+from six import StringIO
 
 import pipes
 import re
@@ -69,7 +70,7 @@ class SysConf(configobj.ConfigObj):
         return out_contents.getvalue()
 
     def _quote(self, value, multiline=False):
-        if not isinstance(value, (str, basestring)):
+        if not isinstance(value, six.string_types):
             raise ValueError('Value "%s" is not a string' % (value))
         if len(value) == 0:
             return ''
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index d9588632..7408989c 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -73,7 +73,7 @@ class Distro(distros.Distro):
         searchservers = []
         dev_names = entries.keys()
         use_ipv6 = False
-        for (dev, info) in entries.iteritems():
+        for (dev, info) in entries.items():
             net_fn = self.network_script_tpl % (dev)
             net_cfg = {
                 'DEVICE': dev,
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index 43682a12..0c6d1203 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -62,7 +62,7 @@ class Distro(distros.Distro):
         nameservers = []
         searchservers = []
         dev_names = entries.keys()
-        for (dev, info) in entries.iteritems():
+        for (dev, info) in entries.items():
             net_fn = self.network_script_tpl % (dev)
             mode = info.get('auto')
             if mode and mode.lower() == 'true':
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index e69d06ff..e1ed4091 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -17,7 +17,6 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import functools
-import httplib
 import json
 
 from cloudinit import log as logging
@@ -25,7 +24,7 @@ from cloudinit import url_helper
 from cloudinit import util
 
 LOG = logging.getLogger(__name__)
-SKIP_USERDATA_CODES = frozenset([httplib.NOT_FOUND])
+SKIP_USERDATA_CODES = frozenset([url_helper.NOT_FOUND])
 
 
 class MetadataLeafDecoder(object):
@@ -123,7 +122,7 @@ class MetadataMaterializer(object):
         leaf_contents = {}
         for (field, resource) in leaves.items():
             leaf_url = url_helper.combine_url(base_url, resource)
-            leaf_blob = str(self._caller(leaf_url))
+            leaf_blob = self._caller(leaf_url).contents
             leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
         joined = {}
         joined.update(child_contents)
@@ -160,7 +159,7 @@ def get_instance_userdata(api_version='latest',
                                          timeout=timeout,
                                          retries=retries,
                                          exception_cb=exception_cb)
-        user_data = str(response)
+        user_data = response.contents
     except url_helper.UrlError as e:
         if e.code not in SKIP_USERDATA_CODES:
             util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
@@ -183,7 +182,7 @@ def get_instance_metadata(api_version='latest',
 
     try:
         response = caller(md_url)
-        materializer = MetadataMaterializer(str(response),
+        materializer = MetadataMaterializer(response.contents,
                                             md_url, caller,
                                             leaf_decoder=leaf_decoder)
         md = materializer.materialize()
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 059d7495..d67a70ea 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -147,7 +147,7 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
     if not modfname.endswith(".py"):
         modfname = "%s.py" % (modfname)
     # TODO(harlowja): Check if path exists??
-    util.write_file(modfname, payload, 0600)
+    util.write_file(modfname, payload, 0o600)
     handlers = pdata['handlers']
     try:
         mod = fixup_handler(importer.import_module(modname))
diff --git a/cloudinit/handlers/boot_hook.py b/cloudinit/handlers/boot_hook.py
index 3a50cf87..a4ea47ac 100644
--- a/cloudinit/handlers/boot_hook.py
+++ b/cloudinit/handlers/boot_hook.py
@@ -50,7 +50,7 @@ class BootHookPartHandler(handlers.Handler):
         filepath = os.path.join(self.boothook_dir, filename)
         contents = util.strip_prefix_suffix(util.dos2unix(payload),
                                             prefix=BOOTHOOK_PREFIX)
-        util.write_file(filepath, contents.lstrip(), 0700)
+        util.write_file(filepath, contents.lstrip(), 0o700)
         return filepath
 
     def handle_part(self, data, ctype, filename, payload, frequency):
diff --git a/cloudinit/handlers/cloud_config.py b/cloudinit/handlers/cloud_config.py
index bf994e33..07b6d0e0 100644
--- a/cloudinit/handlers/cloud_config.py
+++ b/cloudinit/handlers/cloud_config.py
@@ -95,7 +95,7 @@ class CloudConfigPartHandler(handlers.Handler):
             lines.append(util.yaml_dumps(self.cloud_buf))
         else:
             lines = []
-        util.write_file(self.cloud_fn, "\n".join(lines), 0600)
+        util.write_file(self.cloud_fn, "\n".join(lines), 0o600)
 
     def _extract_mergers(self, payload, headers):
         merge_header_headers = ''
diff --git a/cloudinit/handlers/shell_script.py b/cloudinit/handlers/shell_script.py
index 9755ab05..b5087693 100644
--- a/cloudinit/handlers/shell_script.py
+++ b/cloudinit/handlers/shell_script.py
@@ -52,4 +52,4 @@ class ShellScriptPartHandler(handlers.Handler):
         filename = util.clean_filename(filename)
         payload = util.dos2unix(payload)
         path = os.path.join(self.script_dir, filename)
-        util.write_file(path, payload, 0700)
+        util.write_file(path, payload, 0o700)
diff --git a/cloudinit/handlers/upstart_job.py b/cloudinit/handlers/upstart_job.py
index 50d193c4..c5bea711 100644
--- a/cloudinit/handlers/upstart_job.py
+++ b/cloudinit/handlers/upstart_job.py
@@ -65,7 +65,7 @@ class UpstartJobPartHandler(handlers.Handler):
 
         payload = util.dos2unix(payload)
         path = os.path.join(self.upstart_dir, filename)
-        util.write_file(path, payload, 0644)
+        util.write_file(path, payload, 0o644)
 
         if SUITABLE_UPSTART:
             util.subp(["initctl", "reload-configuration"], capture=False)
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index e701126e..ed396b5a 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -23,10 +23,11 @@
 from time import time
 
 import contextlib
-import io
 import os
 
-from ConfigParser import (NoSectionError, NoOptionError, RawConfigParser)
+import six
+from six.moves.configparser import (
+    NoSectionError, NoOptionError, RawConfigParser)
 
 from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
                                 CFG_ENV_NAME)
@@ -318,10 +319,10 @@ class ContentHandlers(object):
         return self.registered[content_type]
 
     def items(self):
-        return self.registered.items()
+        return list(self.registered.items())
 
-    def iteritems(self):
-        return self.registered.iteritems()
+    # XXX This should really go away.
+    iteritems = items
 
 
 class Paths(object):
@@ -449,7 +450,7 @@ class DefaultingConfigParser(RawConfigParser):
 
     def stringify(self, header=None):
         contents = ''
-        with io.BytesIO() as outputstream:
+        with six.StringIO() as outputstream:
             self.write(outputstream)
             outputstream.flush()
             contents = outputstream.getvalue()
diff --git a/cloudinit/log.py b/cloudinit/log.py
index 622c946c..3c79b9c9 100644
--- a/cloudinit/log.py
+++ b/cloudinit/log.py
@@ -28,7 +28,8 @@ import collections
 import os
 import sys
 
-from StringIO import StringIO
+import six
+from six import StringIO
 
 # Logging levels for easy access
 CRITICAL = logging.CRITICAL
@@ -72,13 +73,13 @@ def setupLogging(cfg=None):
 
     log_cfgs = []
     log_cfg = cfg.get('logcfg')
-    if log_cfg and isinstance(log_cfg, (str, basestring)):
+    if log_cfg and isinstance(log_cfg, six.string_types):
         # If there is a 'logcfg' entry in the config,
         # respect it, it is the old keyname
         log_cfgs.append(str(log_cfg))
     elif "log_cfgs" in cfg:
         for a_cfg in cfg['log_cfgs']:
-            if isinstance(a_cfg, (basestring, str)):
+            if isinstance(a_cfg, six.string_types):
                 log_cfgs.append(a_cfg)
             elif isinstance(a_cfg, (collections.Iterable)):
                 cfg_str = [str(c) for c in a_cfg]
diff --git a/cloudinit/mergers/__init__.py b/cloudinit/mergers/__init__.py
index 03aa1ee1..e13f55ac 100644
--- a/cloudinit/mergers/__init__.py
+++ b/cloudinit/mergers/__init__.py
@@ -18,6 +18,8 @@
 
 import re
 
+import six
+
 from cloudinit import importer
 from cloudinit import log as logging
 from cloudinit import type_utils
@@ -95,7 +97,7 @@ def dict_extract_mergers(config):
         raw_mergers = config.pop('merge_type', None)
     if raw_mergers is None:
         return parsed_mergers
-    if isinstance(raw_mergers, (str, basestring)):
+    if isinstance(raw_mergers, six.string_types):
         return string_extract_mergers(raw_mergers)
     for m in raw_mergers:
         if isinstance(m, (dict)):
diff --git a/cloudinit/mergers/m_dict.py b/cloudinit/mergers/m_dict.py
index a16141fa..87cf1a72 100644
--- a/cloudinit/mergers/m_dict.py
+++ b/cloudinit/mergers/m_dict.py
@@ -16,6 +16,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+import six
+
 DEF_MERGE_TYPE = 'no_replace'
 MERGE_TYPES = ('replace', DEF_MERGE_TYPE,)
 
@@ -57,7 +59,7 @@ class Merger(object):
                 return new_v
             if isinstance(new_v, (list, tuple)) and self._recurse_array:
                 return self._merger.merge(old_v, new_v)
-            if isinstance(new_v, (basestring)) and self._recurse_str:
+            if isinstance(new_v, six.string_types) and self._recurse_str:
                 return self._merger.merge(old_v, new_v)
             if isinstance(new_v, (dict)) and self._recurse_dict:
                 return self._merger.merge(old_v, new_v)
diff --git a/cloudinit/mergers/m_list.py b/cloudinit/mergers/m_list.py
index 3b87b0fc..81e5c580 100644
--- a/cloudinit/mergers/m_list.py
+++ b/cloudinit/mergers/m_list.py
@@ -16,6 +16,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+import six
+
 DEF_MERGE_TYPE = 'replace'
 MERGE_TYPES = ('append', 'prepend', DEF_MERGE_TYPE, 'no_replace')
 
@@ -73,7 +75,7 @@ class Merger(object):
                 return old_v
             if isinstance(new_v, (list, tuple)) and self._recurse_array:
                 return self._merger.merge(old_v, new_v)
-            if isinstance(new_v, (str, basestring)) and self._recurse_str:
+            if isinstance(new_v, six.string_types) and self._recurse_str:
                 return self._merger.merge(old_v, new_v)
             if isinstance(new_v, (dict)) and self._recurse_dict:
                 return self._merger.merge(old_v, new_v)
@@ -82,6 +84,6 @@ class Merger(object):
         # Ok now we are replacing same indexes
         merged_list.extend(value)
         common_len = min(len(merged_list), len(merge_with))
-        for i in xrange(0, common_len):
+        for i in range(0, common_len):
             merged_list[i] = merge_same_index(merged_list[i], merge_with[i])
         return merged_list
diff --git a/cloudinit/mergers/m_str.py b/cloudinit/mergers/m_str.py
index e22ce28a..b00c4bf3 100644
--- a/cloudinit/mergers/m_str.py
+++ b/cloudinit/mergers/m_str.py
@@ -17,6 +17,8 @@
 # You should have received a copy of the GNU General Public License
 # along with this program. If not, see <http://www.gnu.org/licenses/>.
 
+import six
+
 
 class Merger(object):
     def __init__(self, _merger, opts):
@@ -34,11 +36,11 @@ class Merger(object):
     # perform the following action, if appending we will
     # merge them together, otherwise we will just return value.
     def _on_str(self, value, merge_with):
-        if not isinstance(value, (basestring)):
+        if not isinstance(value, six.string_types):
             return merge_with
         if not self._append:
             return merge_with
-        if isinstance(value, unicode):
-            return value + unicode(merge_with)
+        if isinstance(value, six.text_type):
+            return value + six.text_type(merge_with)
         else:
-            return value + str(merge_with)
+            return value + six.binary_type(merge_with)
diff --git a/cloudinit/netinfo.py b/cloudinit/netinfo.py
index fb40cc0d..e30d6fb5 100644
--- a/cloudinit/netinfo.py
+++ b/cloudinit/netinfo.py
@@ -87,7 +87,7 @@ def netdev_info(empty=""):
                     devs[curdev][target] = toks[i][len(field) + 1:]
 
     if empty != "":
-        for (_devname, dev) in devs.iteritems():
+        for (_devname, dev) in devs.items():
             for field in dev:
                 if dev[field] == "":
                     dev[field] = empty
@@ -181,7 +181,7 @@ def netdev_pformat():
     else:
         fields = ['Device', 'Up', 'Address', 'Mask', 'Scope', 'Hw-Address']
         tbl = PrettyTable(fields)
-        for (dev, d) in netdev.iteritems():
+        for (dev, d) in netdev.items():
             tbl.add_row([dev, d["up"], d["addr"], d["mask"], ".", d["hwaddr"]])
             if d.get('addr6'):
                 tbl.add_row([dev, d["up"],
diff --git a/cloudinit/signal_handler.py b/cloudinit/signal_handler.py
index 40b0c94c..0d95f506 100644
--- a/cloudinit/signal_handler.py
+++ b/cloudinit/signal_handler.py
@@ -22,7 +22,7 @@ import inspect
 import signal
 import sys
 
-from StringIO import StringIO
+from six import StringIO
 
 from cloudinit import log as logging
 from cloudinit import util
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 15244a0d..eb474079 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -216,11 +216,11 @@ def on_first_boot(data, distro=None):
     files = data.get('files', {})
     if files:
         LOG.debug("Writing %s injected files", len(files))
-        for (filename, content) in files.iteritems():
+        for (filename, content) in files.items():
             if not filename.startswith(os.sep):
                 filename = os.sep + filename
             try:
-                util.write_file(filename, content, mode=0660)
+                util.write_file(filename, content, mode=0o660)
             except IOError:
                 util.logexc(LOG, "Failed writing file: %s", filename)
 
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 8f27ee89..b20ce2a1 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -18,7 +18,7 @@ from cloudinit import log as logging
 from cloudinit import util
 from cloudinit import sources
 from cloudinit import ec2_utils
-from types import StringType
+
 import functools
 
 
@@ -72,10 +72,11 @@ class DataSourceDigitalOcean(sources.DataSource):
         return "\n".join(self.metadata['vendor-data'])
 
     def get_public_ssh_keys(self):
-        if type(self.metadata['public-keys']) is StringType:
-            return [self.metadata['public-keys']]
+        public_keys = self.metadata['public-keys']
+        if isinstance(public_keys, list):
+            return public_keys
         else:
-            return self.metadata['public-keys']
+            return [public_keys]
 
     @property
     def availability_zone(self):
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1b20ecf3..798869b7 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -156,8 +156,8 @@ class DataSourceEc2(sources.DataSource):
         # 'ephemeral0': '/dev/sdb',
         # 'root': '/dev/sda1'}
         found = None
-        bdm_items = self.metadata['block-device-mapping'].iteritems()
-        for (entname, device) in bdm_items:
+        bdm = self.metadata['block-device-mapping']
+        for (entname, device) in bdm.items():
             if entname == name:
                 found = device
                 break
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index dfe90bc6..9a3e30c5 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -262,7 +262,7 @@ def check_seed_contents(content, seed):
 
     userdata = content.get('user-data', "")
     md = {}
-    for (key, val) in content.iteritems():
+    for (key, val) in content.items():
         if key == 'user-data':
             continue
         md[key] = val
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7ba60735..58a4b2a2 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -66,7 +66,7 @@ class DataSourceOVF(sources.DataSource):
             np = {'iso': transport_iso9660,
                   'vmware-guestd': transport_vmware_guestd, }
             name = None
-            for (name, transfunc) in np.iteritems():
+            for (name, transfunc) in np.items():
                 (contents, _dev, _fname) = transfunc()
                 if contents:
                     break
@@ -138,7 +138,7 @@ def read_ovf_environment(contents):
     ud = ""
     cfg_props = ['password']
     md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
-    for (prop, val) in props.iteritems():
+    for (prop, val) in props.items():
         if prop == 'hostname':
             prop = "local-hostname"
         if prop in md_props:
@@ -183,7 +183,7 @@ def transport_iso9660(require_iso=True):
 
     # Go through mounts to see if it was already mounted
     mounts = util.mounts()
-    for (dev, info) in mounts.iteritems():
+    for (dev, info) in mounts.items():
         fstype = info['fstype']
         if fstype != "iso9660" and require_iso:
             continue
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 2733a2f6..7a975d78 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -30,12 +30,12 @@
 #       Comments with "@datadictionary" are snippets of the definition
 
 import base64
+import os
+import serial
+
 from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import util
-import os
-import os.path
-import serial
 
 
 LOG = logging.getLogger(__name__)
@@ -201,7 +201,7 @@ class DataSourceSmartOS(sources.DataSource):
         if b64_all is not None:
             self.b64_all = util.is_true(b64_all)
 
-        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
+        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
             smartos_noun, strip = attribute
             md[ci_noun] = self.query(smartos_noun, strip=strip)
 
@@ -218,11 +218,12 @@ class DataSourceSmartOS(sources.DataSource):
         user_script = os.path.join(data_d, 'user-script')
         u_script_l = "%s/user-script" % LEGACY_USER_D
         write_boot_content(md.get('user-script'), content_f=user_script,
-                           link=u_script_l, shebang=True, mode=0700)
+                           link=u_script_l, shebang=True, mode=0o700)
 
         operator_script = os.path.join(data_d, 'operator-script')
         write_boot_content(md.get('operator-script'),
-                           content_f=operator_script, shebang=False, mode=0700)
+                           content_f=operator_script, shebang=False,
+                           mode=0o700)
 
         # @datadictionary:  This key has no defined format, but its value
         # is written to the file /var/db/mdata-user-data on each boot prior
@@ -381,7 +382,7 @@ def dmi_data():
 
 
 def write_boot_content(content, content_f, link=None, shebang=False,
-                       mode=0400):
+                       mode=0o400):
     """
     Write the content to content_f. Under the following rules:
         1. If no content, remove the file
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7c7ef9ab..39eab51b 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -23,6 +23,8 @@
 import abc
 import os
 
+import six
+
 from cloudinit import importer
 from cloudinit import log as logging
 from cloudinit import type_utils
@@ -130,7 +132,7 @@ class DataSource(object):
         # we want to return the correct value for what will actually
         # exist in this instance
         mappings = {"sd": ("vd", "xvd", "vtb")}
-        for (nfrom, tlist) in mappings.iteritems():
+        for (nfrom, tlist) in mappings.items():
             if not short_name.startswith(nfrom):
                 continue
             for nto in tlist:
@@ -218,18 +220,18 @@ def normalize_pubkey_data(pubkey_data):
     if not pubkey_data:
         return keys
 
-    if isinstance(pubkey_data, (basestring, str)):
+    if isinstance(pubkey_data, six.string_types):
         return str(pubkey_data).splitlines()
 
     if isinstance(pubkey_data, (list, set)):
         return list(pubkey_data)
 
     if isinstance(pubkey_data, (dict)):
-        for (_keyname, klist) in pubkey_data.iteritems():
+        for (_keyname, klist) in pubkey_data.items():
             # lp:506332 uec metadata service responds with
             # data that makes boto populate a string for 'klist' rather
             # than a list.
-            if isinstance(klist, (str, basestring)):
+            if isinstance(klist, six.string_types):
                 klist = [klist]
             if isinstance(klist, (list, set)):
                 for pkey in klist:
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index b7e19314..88c7a198 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -24,6 +24,8 @@ import copy
 import functools
 import os
 
+import six
+
 from cloudinit import ec2_utils
 from cloudinit import log as logging
 from cloudinit import sources
@@ -205,7 +207,7 @@ class BaseReader(object):
         """
 
         load_json_anytype = functools.partial(
-            util.load_json, root_types=(dict, basestring, list))
+            util.load_json, root_types=(dict, list) + six.string_types)
 
         def datafiles(version):
             files = {}
@@ -234,7 +236,7 @@ class BaseReader(object):
             'version': 2,
         }
         data = datafiles(self._find_working_version())
-        for (name, (path, required, translator)) in data.iteritems():
+        for (name, (path, required, translator)) in data.items():
             path = self._path_join(self.base_path, path)
             data = None
             found = False
@@ -364,7 +366,7 @@ class ConfigDriveReader(BaseReader):
             raise NonReadable("%s: no files found" % (self.base_path))
 
         md = {}
-        for (name, (key, translator, default)) in FILES_V1.iteritems():
+        for (name, (key, translator, default)) in FILES_V1.items():
             if name in found:
                 path = found[name]
                 try:
@@ -478,7 +480,7 @@ def convert_vendordata_json(data, recurse=True):
     """
     if not data:
         return None
-    if isinstance(data, (str, unicode, basestring)):
+    if isinstance(data, six.string_types):
         return data
     if isinstance(data, list):
         return copy.deepcopy(data)
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 14d0cb0f..9b2f5ed5 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -239,7 +239,7 @@ def setup_user_keys(keys, username, options=None):
     # Make sure the users .ssh dir is setup accordingly
     (ssh_dir, pwent) = users_ssh_info(username)
     if not os.path.isdir(ssh_dir):
-        util.ensure_dir(ssh_dir, mode=0700)
+        util.ensure_dir(ssh_dir, mode=0o700)
         util.chownbyid(ssh_dir, pwent.pw_uid, pwent.pw_gid)
 
     # Turn the 'update' keys given into actual entries
@@ -252,8 +252,8 @@ def setup_user_keys(keys, username, options=None):
     (auth_key_fn, auth_key_entries) = extract_authorized_keys(username)
     with util.SeLinuxGuard(ssh_dir, recursive=True):
         content = update_authorized_keys(auth_key_entries, key_entries)
-        util.ensure_dir(os.path.dirname(auth_key_fn), mode=0700)
-        util.write_file(auth_key_fn, content, mode=0600)
+        util.ensure_dir(os.path.dirname(auth_key_fn), mode=0o700)
+        util.write_file(auth_key_fn, content, mode=0o600)
         util.chownbyid(auth_key_fn, pwent.pw_uid, pwent.pw_gid)
 
 
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 67f467f7..f4f4591d 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -20,12 +20,13 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import cPickle as pickle
-
 import copy
 import os
 import sys
 
+import six
+from six.moves import cPickle as pickle
+
 from cloudinit.settings import (PER_INSTANCE, FREQUENCIES, CLOUD_CONFIG)
 
 from cloudinit import handlers
@@ -202,7 +203,7 @@ class Init(object):
             util.logexc(LOG, "Failed pickling datasource %s", self.datasource)
             return False
         try:
-            util.write_file(pickled_fn, pk_contents, mode=0400)
+            util.write_file(pickled_fn, pk_contents, mode=0o400)
         except Exception:
             util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn)
             return False
@@ -324,15 +325,15 @@ class Init(object):
 
     def _store_userdata(self):
         raw_ud = "%s" % (self.datasource.get_userdata_raw())
-        util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0600)
+        util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
         processed_ud = "%s" % (self.datasource.get_userdata())
-        util.write_file(self._get_ipath('userdata'), processed_ud, 0600)
+        util.write_file(self._get_ipath('userdata'), processed_ud, 0o600)
 
     def _store_vendordata(self):
         raw_vd = "%s" % (self.datasource.get_vendordata_raw())
-        util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0600)
+        util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
         processed_vd = "%s" % (self.datasource.get_vendordata())
-        util.write_file(self._get_ipath('vendordata'), processed_vd, 0600)
+        util.write_file(self._get_ipath('vendordata'), processed_vd, 0o600)
 
     def _default_handlers(self, opts=None):
         if opts is None:
@@ -384,7 +385,7 @@ class Init(object):
             if not path or not os.path.isdir(path):
                 return
             potential_handlers = util.find_modules(path)
-            for (fname, mod_name) in potential_handlers.iteritems():
+            for (fname, mod_name) in potential_handlers.items():
                 try:
                     mod_locs, looked_locs = importer.find_module(
                         mod_name, [''], ['list_types', 'handle_part'])
@@ -422,7 +423,7 @@ class Init(object):
 
         def init_handlers():
             # Init the handlers first
-            for (_ctype, mod) in c_handlers.iteritems():
+            for (_ctype, mod) in c_handlers.items():
                 if mod in c_handlers.initialized:
                     # Avoid initing the same module twice (if said module
                     # is registered to more than one content-type).
@@ -449,7 +450,7 @@ class Init(object):
 
         def finalize_handlers():
             # Give callbacks opportunity to finalize
-            for (_ctype, mod) in c_handlers.iteritems():
+            for (_ctype, mod) in c_handlers.items():
                 if mod not in c_handlers.initialized:
                     # Said module was never inited in the first place, so lets
                     # not attempt to finalize those that never got called.
@@ -574,7 +575,7 @@ class Modules(object):
         for item in cfg_mods:
             if not item:
                 continue
-            if isinstance(item, (str, basestring)):
+            if isinstance(item, six.string_types):
                 module_list.append({
                     'mod': item.strip(),
                 })
diff --git a/cloudinit/type_utils.py b/cloudinit/type_utils.py
index cc3d9495..b93efd6a 100644
--- a/cloudinit/type_utils.py
+++ b/cloudinit/type_utils.py
@@ -22,11 +22,31 @@
 
 import types
 
+import six
+
+
+if six.PY3:
+    _NAME_TYPES = (
+        types.ModuleType,
+        types.FunctionType,
+        types.LambdaType,
+        type,
+    )
+else:
+    _NAME_TYPES = (
+        types.TypeType,
+        types.ModuleType,
+        types.FunctionType,
+        types.LambdaType,
+        types.ClassType,
+    )
+
 
 def obj_name(obj):
-    if isinstance(obj, (types.TypeType,
-                        types.ModuleType,
-                        types.FunctionType,
-                        types.LambdaType)):
-        return str(obj.__name__)
-    return obj_name(obj.__class__)
+    if isinstance(obj, _NAME_TYPES):
+        return six.text_type(obj.__name__)
+    else:
+        if not hasattr(obj, '__class__'):
+            return repr(obj)
+        else:
+            return obj_name(obj.__class__)
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 3074dd08..62001dff 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,21 +20,29 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import httplib
 import time
-import urllib
+
+import six
 
 import requests
 from requests import exceptions
 
-from urlparse import (urlparse, urlunparse)
+from six.moves.urllib.parse import (
+    urlparse, urlunparse,
+    quote as urlquote)
 
 from cloudinit import log as logging
 from cloudinit import version
 
 LOG = logging.getLogger(__name__)
 
-NOT_FOUND = httplib.NOT_FOUND
+if six.PY2:
+    import httplib
+    NOT_FOUND = httplib.NOT_FOUND
+else:
+    import http.client
+    NOT_FOUND = http.client.NOT_FOUND
+
 
 # Check if requests has ssl support (added in requests >= 0.8.8)
 SSL_ENABLED = False
@@ -70,7 +78,7 @@ def combine_url(base, *add_ons):
         path = url_parsed[2]
         if path and not path.endswith("/"):
             path += "/"
-        path += urllib.quote(str(add_on), safe="/:")
+        path += urlquote(str(add_on), safe="/:")
         url_parsed[2] = path
         return urlunparse(url_parsed)
 
@@ -111,7 +119,7 @@ class UrlResponse(object):
 
     @property
     def contents(self):
-        return self._response.content
+        return self._response.text
 
     @property
     def url(self):
@@ -135,7 +143,7 @@ class UrlResponse(object):
         return self._response.status_code
 
     def __str__(self):
-        return self.contents
+        return self._response.text
 
 
 class UrlError(IOError):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index de6487d8..9111bd39 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -29,6 +29,8 @@ from email.mime.multipart import MIMEMultipart
 from email.mime.nonmultipart import MIMENonMultipart
 from email.mime.text import MIMEText
 
+import six
+
 from cloudinit import handlers
 from cloudinit import log as logging
 from cloudinit import util
@@ -235,7 +237,7 @@ class UserDataProcessor(object):
                 resp = util.read_file_or_url(include_url,
                                              ssl_details=self.ssl_details)
                 if include_once_on and resp.ok():
-                    util.write_file(include_once_fn, str(resp), mode=0600)
+                    util.write_file(include_once_fn, str(resp), mode=0o600)
                 if resp.ok():
                     content = str(resp)
                 else:
@@ -256,7 +258,7 @@ class UserDataProcessor(object):
             #    filename and type not be present
             # or
             #  scalar(payload)
-            if isinstance(ent, (str, basestring)):
+            if isinstance(ent, six.string_types):
                 ent = {'content': ent}
             if not isinstance(ent, (dict)):
                 # TODO(harlowja) raise?
@@ -337,7 +339,7 @@ def convert_string(raw_data, headers=None):
     data = util.decomp_gzip(raw_data)
     if "mime-version:" in data[0:4096].lower():
         msg = email.message_from_string(data)
-        for (key, val) in headers.iteritems():
+        for (key, val) in headers.items():
             _replace_header(msg, key, val)
     else:
         mtype = headers.get(CONTENT_TYPE, NOT_MULTIPART_TYPE)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 9efc704a..434ba7fb 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -20,8 +20,6 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from StringIO import StringIO
-
 import contextlib
 import copy as obj_copy
 import ctypes
@@ -45,8 +43,10 @@ import subprocess
 import sys
 import tempfile
 import time
-import urlparse
 
+from six.moves.urllib import parse as urlparse
+
+import six
 import yaml
 
 from cloudinit import importer
@@ -69,8 +69,26 @@ FN_REPLACEMENTS = {
 }
 FN_ALLOWED = ('_-.()' + string.digits + string.ascii_letters)
 
+TRUE_STRINGS = ('true', '1', 'on', 'yes')
+FALSE_STRINGS = ('off', '0', 'no', 'false')
+
+
 # Helper utils to see if running in a container
-CONTAINER_TESTS = ['running-in-container', 'lxc-is-container']
+CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
+
+
+def decode_binary(blob, encoding='utf-8'):
+    # Converts a binary type into a text type using given encoding.
+    if isinstance(blob, six.text_type):
+        return blob
+    return blob.decode(encoding)
+
+
+def encode_text(text, encoding='utf-8'):
+    # Converts a text string into a binary type using given encoding.
+    if isinstance(text, six.binary_type):
+        return text
+    return text.encode(encoding)
 
 
 class ProcessExecutionError(IOError):
@@ -95,7 +113,7 @@ class ProcessExecutionError(IOError):
         else:
             self.description = description
 
-        if not isinstance(exit_code, (long, int)):
+        if not isinstance(exit_code, six.integer_types):
             self.exit_code = '-'
         else:
             self.exit_code = exit_code
@@ -151,7 +169,8 @@ class SeLinuxGuard(object):
 
         path = os.path.realpath(self.path)
         # path should be a string, not unicode
-        path = str(path)
+        if six.PY2:
+            path = str(path)
         try:
             stats = os.lstat(path)
             self.selinux.matchpathcon(path, stats[stat.ST_MODE])
@@ -209,10 +228,10 @@ def fork_cb(child_cb, *args, **kwargs):
 def is_true(val, addons=None):
     if isinstance(val, (bool)):
         return val is True
-    check_set = ['true', '1', 'on', 'yes']
+    check_set = TRUE_STRINGS
     if addons:
-        check_set = check_set + addons
-    if str(val).lower().strip() in check_set:
+        check_set = list(check_set) + addons
+    if six.text_type(val).lower().strip() in check_set:
         return True
     return False
 
@@ -220,10 +239,10 @@ def is_true(val, addons=None):
 def is_false(val, addons=None):
     if isinstance(val, (bool)):
         return val is False
-    check_set = ['off', '0', 'no', 'false']
+    check_set = FALSE_STRINGS
     if addons:
-        check_set = check_set + addons
-    if str(val).lower().strip() in check_set:
+        check_set = list(check_set) + addons
+    if six.text_type(val).lower().strip() in check_set:
         return True
     return False
 
@@ -273,7 +292,7 @@ def uniq_merge_sorted(*lists):
 def uniq_merge(*lists):
     combined_list = []
     for a_list in lists:
-        if isinstance(a_list, (str, basestring)):
+        if isinstance(a_list, six.string_types):
             a_list = a_list.strip().split(",")
             # Kickout the empty ones
             a_list = [a for a in a_list if len(a)]
@@ -282,7 +301,7 @@ def uniq_merge(*lists):
 
 
 def clean_filename(fn):
-    for (k, v) in FN_REPLACEMENTS.iteritems():
+    for (k, v) in FN_REPLACEMENTS.items():
         fn = fn.replace(k, v)
     removals = []
     for k in fn:
@@ -296,14 +315,14 @@ def clean_filename(fn):
 
 def decomp_gzip(data, quiet=True):
     try:
-        buf = StringIO(str(data))
+        buf = six.BytesIO(encode_text(data))
         with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
-            return gh.read()
+            return decode_binary(gh.read())
     except Exception as e:
         if quiet:
             return data
         else:
-            raise DecompressionError(str(e))
+            raise DecompressionError(six.text_type(e))
 
 
 def extract_usergroup(ug_pair):
@@ -362,7 +381,7 @@ def multi_log(text, console=True, stderr=True,
 
 
 def load_json(text, root_types=(dict,)):
-    decoded = json.loads(text)
+    decoded = json.loads(decode_binary(text))
     if not isinstance(decoded, tuple(root_types)):
         expected_types = ", ".join([str(t) for t in root_types])
         raise TypeError("(%s) root types expected, got %s instead"
@@ -394,7 +413,7 @@ def get_cfg_option_str(yobj, key, default=None):
     if key not in yobj:
         return default
     val = yobj[key]
-    if not isinstance(val, (str, basestring)):
+    if not isinstance(val, six.string_types):
         val = str(val)
     return val
 
@@ -433,7 +452,7 @@ def get_cfg_option_list(yobj, key, default=None):
     if isinstance(val, (list)):
         cval = [v for v in val]
         return cval
-    if not isinstance(val, (basestring)):
+    if not isinstance(val, six.string_types):
         val = str(val)
     return [val]
 
@@ -708,10 +727,10 @@ def read_file_or_url(url, timeout=5, retries=10,
 
 def load_yaml(blob, default=None, allowed=(dict,)):
     loaded = default
+    blob = decode_binary(blob)
     try:
-        blob = str(blob)
-        LOG.debug(("Attempting to load yaml from string "
-                 "of length %s with allowed root types %s"),
+        LOG.debug("Attempting to load yaml from string "
+                 "of length %s with allowed root types %s",
                  len(blob), allowed)
         converted = safeyaml.load(blob)
         if not isinstance(converted, allowed):
@@ -746,14 +765,12 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
     md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
     md = None
     if md_resp.ok():
-        md_str = str(md_resp)
-        md = load_yaml(md_str, default={})
+        md = load_yaml(md_resp.contents, default={})
 
     ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
     ud = None
     if ud_resp.ok():
-        ud_str = str(ud_resp)
-        ud = ud_str
+        ud = ud_resp.contents
 
     return (md, ud)
 
@@ -784,7 +801,7 @@ def read_conf_with_confd(cfgfile):
     if "conf_d" in cfg:
         confd = cfg['conf_d']
         if confd:
-            if not isinstance(confd, (str, basestring)):
+            if not isinstance(confd, six.string_types):
                 raise TypeError(("Config file %s contains 'conf_d' "
                                  "with non-string type %s") %
                                  (cfgfile, type_utils.obj_name(confd)))
@@ -921,8 +938,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
         return (None, None, None)
 
     resp = read_file_or_url(url)
-    if resp.contents.startswith(starts) and resp.ok():
-        return (key, url, str(resp))
+    if resp.ok() and resp.contents.startswith(starts):
+        return (key, url, resp.contents)
 
     return (key, url, None)
 
@@ -1076,9 +1093,9 @@ def uniq_list(in_list):
     return out_list
 
 
-def load_file(fname, read_cb=None, quiet=False):
+def load_file(fname, read_cb=None, quiet=False, decode=True):
     LOG.debug("Reading from %s (quiet=%s)", fname, quiet)
-    ofh = StringIO()
+    ofh = six.BytesIO()
     try:
         with open(fname, 'rb') as ifh:
             pipe_in_out(ifh, ofh, chunk_cb=read_cb)
@@ -1089,7 +1106,10 @@ def load_file(fname, read_cb=None, quiet=False):
             raise
     contents = ofh.getvalue()
     LOG.debug("Read %s bytes from %s", len(contents), fname)
-    return contents
+    if decode:
+        return decode_binary(contents)
+    else:
+        return contents
 
 
 def get_cmdline():
@@ -1219,7 +1239,7 @@ def logexc(log, msg, *args):
 
 def hash_blob(blob, routine, mlen=None):
     hasher = hashlib.new(routine)
-    hasher.update(blob)
+    hasher.update(encode_text(blob))
     digest = hasher.hexdigest()
     # Don't get to long now
     if mlen is not None:
@@ -1280,8 +1300,7 @@ def yaml_dumps(obj, explicit_start=True, explicit_end=True):
                           indent=4,
                           explicit_start=explicit_start,
                           explicit_end=explicit_end,
-                          default_flow_style=False,
-                          allow_unicode=True)
+                          default_flow_style=False)
 
 
 def ensure_dir(path, mode=None):
@@ -1515,11 +1534,17 @@ def write_file(filename, content, mode=0o644, omode="wb"):
     @param filename: The full path of the file to write.
     @param content: The content to write to the file.
     @param mode: The filesystem mode to set on the file.
-    @param omode: The open mode used when opening the file (r, rb, a, etc.)
+    @param omode: The open mode used when opening the file (w, wb, a, etc.)
     """
     ensure_dir(os.path.dirname(filename))
-    LOG.debug("Writing to %s - %s: [%s] %s bytes",
-               filename, omode, mode, len(content))
+    if 'b' in omode.lower():
+        content = encode_text(content)
+        write_type = 'bytes'
+    else:
+        content = decode_binary(content)
+        write_type = 'characters'
+    LOG.debug("Writing to %s - %s: [%s] %s %s",
+               filename, omode, mode, len(content), write_type)
     with SeLinuxGuard(path=filename):
         with open(filename, omode) as fh:
             fh.write(content)
@@ -1608,10 +1633,10 @@ def shellify(cmdlist, add_header=True):
         if isinstance(args, list):
             fixed = []
             for f in args:
-                fixed.append("'%s'" % (str(f).replace("'", escaped)))
+                fixed.append("'%s'" % (six.text_type(f).replace("'", escaped)))
             content = "%s%s\n" % (content, ' '.join(fixed))
             cmds_made += 1
-        elif isinstance(args, (str, basestring)):
+        elif isinstance(args, six.string_types):
             content = "%s%s\n" % (content, args)
             cmds_made += 1
         else:
@@ -1722,7 +1747,7 @@ def expand_package_list(version_fmt, pkgs):
 
     pkglist = []
     for pkg in pkgs:
-        if isinstance(pkg, basestring):
+        if isinstance(pkg, six.string_types):
             pkglist.append(pkg)
             continue
 
diff --git a/packages/bddeb b/packages/bddeb
index 9d264f92..83ca68bb 100755
--- a/packages/bddeb
+++ b/packages/bddeb
@@ -38,6 +38,7 @@ PKG_MP = {
     'pyserial': 'python-serial',
     'pyyaml': 'python-yaml',
     'requests': 'python-requests',
+    'six': 'python-six',
 }
 DEBUILD_ARGS = ["-S", "-d"]
 
diff --git a/packages/brpm b/packages/brpm
index 9657b1dd..72bfca08 100755
--- a/packages/brpm
+++ b/packages/brpm
@@ -45,6 +45,7 @@ PKG_MP = {
         'pyserial': 'pyserial',
         'pyyaml': 'PyYAML',
         'requests': 'python-requests',
+        'six': 'python-six',
     },
     'suse': {
         'argparse': 'python-argparse',
@@ -56,6 +57,7 @@ PKG_MP = {
         'pyserial': 'python-pyserial',
         'pyyaml': 'python-yaml',
         'requests': 'python-requests',
+        'six': 'python-six',
     }
 }
 
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 03296e62..a35afc27 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -1,11 +1,11 @@
 """Tests for handling of userdata within cloud init."""
 
-import StringIO
-
 import gzip
 import logging
 import os
 
+from six import BytesIO, StringIO
+
 from email.mime.application import MIMEApplication
 from email.mime.base import MIMEBase
 from email.mime.multipart import MIMEMultipart
@@ -53,7 +53,7 @@ class TestConsumeUserData(helpers.FilesystemMockingTestCase):
         self.patchUtils(root)
 
     def capture_log(self, lvl=logging.DEBUG):
-        log_file = StringIO.StringIO()
+        log_file = StringIO()
         self._log_handler = logging.StreamHandler(log_file)
         self._log_handler.setLevel(lvl)
         self._log = log.getLogger()
@@ -351,9 +351,9 @@ p: 1
         """Tests that individual message gzip encoding works."""
 
         def gzip_part(text):
-            contents = StringIO.StringIO()
-            f = gzip.GzipFile(fileobj=contents, mode='w')
-            f.write(str(text))
+            contents = BytesIO()
+            f = gzip.GzipFile(fileobj=contents, mode='wb')
+            f.write(util.encode_text(text))
             f.flush()
             f.close()
             return MIMEApplication(contents.getvalue(), 'gzip')
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index e9235951..ae9e6c22 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -85,7 +85,7 @@ class TestNoCloudDataSource(MockerTestCase):
 
         data = {
             'fs_label': None,
-            'meta-data': {'instance-id': 'IID'},
+            'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
             'user-data': "USER_DATA_RAW",
         }
 
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 49894e51..81ef1546 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -20,12 +20,11 @@ import copy
 import json
 import re
 
-from StringIO import StringIO
-
-from urlparse import urlparse
-
 from .. import helpers as test_helpers
 
+from six import StringIO
+from six.moves.urllib.parse import urlparse
+
 from cloudinit import helpers
 from cloudinit import settings
 from cloudinit.sources import DataSourceOpenStack as ds
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 33a1d6e1..6e1a0b69 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -4,6 +4,8 @@ import mocker
 
 import os
 
+from six import StringIO
+
 from cloudinit import distros
 from cloudinit import helpers
 from cloudinit import settings
@@ -11,8 +13,6 @@ from cloudinit import util
 
 from cloudinit.distros.parsers.sys_conf import SysConf
 
-from StringIO import StringIO
-
 
 BASE_NET_CFG = '''
 auto lo
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 203dd2aa..f5832365 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -16,12 +16,12 @@ class TestAptProxyConfig(MockerTestCase):
         self.cfile = os.path.join(self.tmp, "config.cfg")
 
     def _search_apt_config(self, contents, ptype, value):
-        print(
+        ## print(
+        ##     r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
+        ##     contents, "flags=re.IGNORECASE")
+        return re.search(
             r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
-            contents, "flags=re.IGNORECASE")
-        return(re.search(
-            r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
-            contents, flags=re.IGNORECASE))
+            contents, flags=re.IGNORECASE)
 
     def test_apt_proxy_written(self):
         cfg = {'apt_proxy': 'myproxy'}
diff --git a/tests/unittests/test_handler/test_handler_locale.py b/tests/unittests/test_handler/test_handler_locale.py
index eb251636..690ef86f 100644
--- a/tests/unittests/test_handler/test_handler_locale.py
+++ b/tests/unittests/test_handler/test_handler_locale.py
@@ -29,7 +29,7 @@ from .. import helpers as t_help
 
 from configobj import ConfigObj
 
-from StringIO import StringIO
+from six import BytesIO
 
 import logging
 
@@ -59,6 +59,6 @@ class TestLocale(t_help.FilesystemMockingTestCase):
         cc = self._get_cloud('sles')
         cc_locale.handle('cc_locale', cfg, cc, LOG, [])
 
-        contents = util.load_file('/etc/sysconfig/language')
-        n_cfg = ConfigObj(StringIO(contents))
+        contents = util.load_file('/etc/sysconfig/language', decode=False)
+        n_cfg = ConfigObj(BytesIO(contents))
         self.assertEquals({'RC_LANG': cfg['locale']}, dict(n_cfg))
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 40481f16..579377fb 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -22,7 +22,7 @@ import base64
 import gzip
 import tempfile
 
-from StringIO import StringIO
+from six import StringIO
 
 from cloudinit import cloud
 from cloudinit import distros
diff --git a/tests/unittests/test_handler/test_handler_set_hostname.py b/tests/unittests/test_handler/test_handler_set_hostname.py
index e1530e30..a9f7829b 100644
--- a/tests/unittests/test_handler/test_handler_set_hostname.py
+++ b/tests/unittests/test_handler/test_handler_set_hostname.py
@@ -9,7 +9,7 @@ from .. import helpers as t_help
 
 import logging
 
-from StringIO import StringIO
+from six import BytesIO
 
 from configobj import ConfigObj
 
@@ -38,8 +38,8 @@ class TestHostname(t_help.FilesystemMockingTestCase):
         cc_set_hostname.handle('cc_set_hostname',
                                cfg, cc, LOG, [])
         if not distro.uses_systemd():
-            contents = util.load_file("/etc/sysconfig/network")
-            n_cfg = ConfigObj(StringIO(contents))
+            contents = util.load_file("/etc/sysconfig/network", decode=False)
+            n_cfg = ConfigObj(BytesIO(contents))
             self.assertEquals({'HOSTNAME': 'blah.blah.blah.yahoo.com'},
                               dict(n_cfg))
 
diff --git a/tests/unittests/test_handler/test_handler_timezone.py b/tests/unittests/test_handler/test_handler_timezone.py
index 874db340..10ea2040 100644
--- a/tests/unittests/test_handler/test_handler_timezone.py
+++ b/tests/unittests/test_handler/test_handler_timezone.py
@@ -29,7 +29,7 @@ from .. import helpers as t_help
 
 from configobj import ConfigObj
 
-from StringIO import StringIO
+from six import BytesIO
 
 import logging
 
@@ -67,8 +67,8 @@ class TestTimezone(t_help.FilesystemMockingTestCase):
 
         cc_timezone.handle('cc_timezone', cfg, cc, LOG, [])
 
-        contents = util.load_file('/etc/sysconfig/clock')
-        n_cfg = ConfigObj(StringIO(contents))
+        contents = util.load_file('/etc/sysconfig/clock', decode=False)
+        n_cfg = ConfigObj(BytesIO(contents))
         self.assertEquals({'TIMEZONE': cfg['timezone']}, dict(n_cfg))
 
         contents = util.load_file('/etc/localtime')
diff --git a/tests/unittests/test_handler/test_handler_yum_add_repo.py b/tests/unittests/test_handler/test_handler_yum_add_repo.py
index 435c9787..81806ad1 100644
--- a/tests/unittests/test_handler/test_handler_yum_add_repo.py
+++ b/tests/unittests/test_handler/test_handler_yum_add_repo.py
@@ -6,7 +6,7 @@ from .. import helpers
 
 import logging
 
-from StringIO import StringIO
+from six import BytesIO
 
 import configobj
 
@@ -52,8 +52,9 @@ class TestConfig(helpers.FilesystemMockingTestCase):
         }
         self.patchUtils(self.tmp)
         cc_yum_add_repo.handle('yum_add_repo', cfg, None, LOG, [])
-        contents = util.load_file("/etc/yum.repos.d/epel_testing.repo")
-        contents = configobj.ConfigObj(StringIO(contents))
+        contents = util.load_file("/etc/yum.repos.d/epel_testing.repo",
+                                  decode=False)
+        contents = configobj.ConfigObj(BytesIO(contents))
         expected = {
             'epel_testing': {
                 'name': 'Extra Packages for Enterprise Linux 5 - Testing',
-- 
cgit v1.2.3


From 3b798b5d5c3caa5d0e8e534855e29010ca932aaa Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Thu, 22 Jan 2015 21:21:04 -0500
Subject: Low hanging Python 3 fruit.

---
 cloudinit/config/cc_ca_certs.py                          |  4 ++--
 cloudinit/config/cc_chef.py                              |  6 ++++--
 cloudinit/distros/__init__.py                            | 12 ++++++++++--
 cloudinit/distros/debian.py                              |  2 +-
 cloudinit/distros/rhel_util.py                           |  4 ++--
 cloudinit/distros/sles.py                                |  2 +-
 cloudinit/sources/DataSourceAltCloud.py                  | 12 ++++++------
 cloudinit/sources/DataSourceAzure.py                     |  4 ++--
 cloudinit/sources/DataSourceMAAS.py                      | 10 ++++++----
 cloudinit/sources/DataSourceOpenNebula.py                |  2 +-
 cloudinit/templater.py                                   |  2 +-
 cloudinit/util.py                                        |  7 +++++--
 templates/resolv.conf.tmpl                               |  2 +-
 tests/unittests/helpers.py                               |  4 ++--
 tests/unittests/test_datasource/test_configdrive.py      |  2 +-
 tests/unittests/test_datasource/test_digitalocean.py     |  7 +++----
 tests/unittests/test_datasource/test_gce.py              |  2 +-
 tests/unittests/test_datasource/test_opennebula.py       |  2 +-
 tests/unittests/test_datasource/test_smartos.py          |  4 +++-
 .../unittests/test_handler/test_handler_apt_configure.py |  2 +-
 tests/unittests/test_merging.py                          | 16 +++++++++-------
 tools/ccfg-merge-debug                                   |  4 ++--
 22 files changed, 65 insertions(+), 47 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ca_certs.py b/cloudinit/config/cc_ca_certs.py
index 4f2a46a1..8248b020 100644
--- a/cloudinit/config/cc_ca_certs.py
+++ b/cloudinit/config/cc_ca_certs.py
@@ -44,7 +44,7 @@ def add_ca_certs(certs):
     if certs:
         # First ensure they are strings...
         cert_file_contents = "\n".join([str(c) for c in certs])
-        util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0644)
+        util.write_file(CA_CERT_FULL_PATH, cert_file_contents, mode=0o644)
 
         # Append cert filename to CA_CERT_CONFIG file.
         # We have to strip the content because blank lines in the file
@@ -63,7 +63,7 @@ def remove_default_ca_certs():
     """
     util.delete_dir_contents(CA_CERT_PATH)
     util.delete_dir_contents(CA_CERT_SYSTEM_PATH)
-    util.write_file(CA_CERT_CONFIG, "", mode=0644)
+    util.write_file(CA_CERT_CONFIG, "", mode=0o644)
     debconf_sel = "ca-certificates ca-certificates/trust_new_crts select no"
     util.subp(('debconf-set-selections', '-'), debconf_sel)
 
diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index fc837363..584199e5 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -76,6 +76,8 @@ from cloudinit import templater
 from cloudinit import url_helper
 from cloudinit import util
 
+import six
+
 RUBY_VERSION_DEFAULT = "1.8"
 
 CHEF_DIRS = tuple([
@@ -261,7 +263,7 @@ def run_chef(chef_cfg, log):
         cmd_args = chef_cfg['exec_arguments']
         if isinstance(cmd_args, (list, tuple)):
             cmd.extend(cmd_args)
-        elif isinstance(cmd_args, (str, basestring)):
+        elif isinstance(cmd_args, six.string_types):
             cmd.append(cmd_args)
         else:
             log.warn("Unknown type %s provided for chef"
@@ -300,7 +302,7 @@ def install_chef(cloud, chef_cfg, log):
         with util.tempdir() as tmpd:
             # Use tmpdir over tmpfile to avoid 'text file busy' on execute
             tmpf = "%s/chef-omnibus-install" % tmpd
-            util.write_file(tmpf, str(content), mode=0700)
+            util.write_file(tmpf, str(content), mode=0o700)
             util.subp([tmpf], capture=False)
     else:
         log.warn("Unknown chef install type '%s'", install_type)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 4ebccdda..6b96d58c 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -25,7 +25,6 @@ import six
 from six import StringIO
 
 import abc
-import itertools
 import os
 import re
 
@@ -37,6 +36,15 @@ from cloudinit import util
 
 from cloudinit.distros.parsers import hosts
 
+try:
+    # Python 3
+    from six import filter
+except ImportError:
+    # Python 2
+    from itertools import ifilter as filter
+
+
+
 OSFAMILIES = {
     'debian': ['debian', 'ubuntu'],
     'redhat': ['fedora', 'rhel'],
@@ -853,7 +861,7 @@ def extract_default(users, default_name=None, default_config=None):
             return config['default']
 
     tmp_users = users.items()
-    tmp_users = dict(itertools.ifilter(safe_find, tmp_users))
+    tmp_users = dict(filter(safe_find, tmp_users))
     if not tmp_users:
         return (default_name, default_config)
     else:
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index b09eb094..6d3a82bf 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -97,7 +97,7 @@ class Distro(distros.Distro):
         if not conf:
             conf = HostnameConf('')
         conf.set_hostname(your_hostname)
-        util.write_file(out_fn, str(conf), 0644)
+        util.write_file(out_fn, str(conf), 0o644)
 
     def _read_system_hostname(self):
         sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index 063d536e..903d7793 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -50,7 +50,7 @@ def update_sysconfig_file(fn, adjustments, allow_empty=False):
         ]
         if not exists:
             lines.insert(0, util.make_header())
-        util.write_file(fn, "\n".join(lines) + "\n", 0644)
+        util.write_file(fn, "\n".join(lines) + "\n", 0o644)
 
 
 # Helper function to read a RHEL/SUSE /etc/sysconfig/* file
@@ -86,4 +86,4 @@ def update_resolve_conf_file(fn, dns_servers, search_servers):
                 r_conf.add_search_domain(s)
             except ValueError:
                 util.logexc(LOG, "Failed at adding search domain %s", s)
-    util.write_file(fn, str(r_conf), 0644)
+    util.write_file(fn, str(r_conf), 0o644)
diff --git a/cloudinit/distros/sles.py b/cloudinit/distros/sles.py
index 0c6d1203..620c974c 100644
--- a/cloudinit/distros/sles.py
+++ b/cloudinit/distros/sles.py
@@ -113,7 +113,7 @@ class Distro(distros.Distro):
         if not conf:
             conf = HostnameConf('')
         conf.set_hostname(hostname)
-        util.write_file(out_fn, str(conf), 0644)
+        util.write_file(out_fn, str(conf), 0o644)
 
     def _read_system_hostname(self):
         host_fn = self.hostname_conf_fn
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 1e913a6e..69053d0b 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -124,11 +124,11 @@ class DataSourceAltCloud(sources.DataSource):
         cmd = CMD_DMI_SYSTEM
         try:
             (cmd_out, _err) = util.subp(cmd)
-        except ProcessExecutionError, _err:
+        except ProcessExecutionError as _err:
             LOG.debug(('Failed command: %s\n%s') % \
                 (' '.join(cmd), _err.message))
             return 'UNKNOWN'
-        except OSError, _err:
+        except OSError as _err:
             LOG.debug(('Failed command: %s\n%s') % \
                 (' '.join(cmd), _err.message))
             return 'UNKNOWN'
@@ -211,11 +211,11 @@ class DataSourceAltCloud(sources.DataSource):
             cmd = CMD_PROBE_FLOPPY
             (cmd_out, _err) = util.subp(cmd)
             LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
-        except ProcessExecutionError, _err:
+        except ProcessExecutionError as _err:
             util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                         _err.message)
             return False
-        except OSError, _err:
+        except OSError as _err:
             util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                         _err.message)
             return False
@@ -228,11 +228,11 @@ class DataSourceAltCloud(sources.DataSource):
             cmd.append('--exit-if-exists=' + floppy_dev)
             (cmd_out, _err) = util.subp(cmd)
             LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
-        except ProcessExecutionError, _err:
+        except ProcessExecutionError as _err:
             util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                         _err.message)
             return False
-        except OSError, _err:
+        except OSError as _err:
             util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
                         _err.message)
             return False
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 09bc196d..29ae2c22 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -151,7 +151,7 @@ class DataSourceAzureNet(sources.DataSource):
 
         # walinux agent writes files world readable, but expects
         # the directory to be protected.
-        write_files(ddir, files, dirmode=0700)
+        write_files(ddir, files, dirmode=0o700)
 
         # handle the hostname 'publishing'
         try:
@@ -390,7 +390,7 @@ def write_files(datadir, files, dirmode=None):
     util.ensure_dir(datadir, dirmode)
     for (name, content) in files.items():
         util.write_file(filename=os.path.join(datadir, name),
-                        content=content, mode=0600)
+                        content=content, mode=0o600)
 
 
 def invoke_agent(cmd):
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9a3e30c5..8f9c81de 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -18,6 +18,8 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+from __future__ import print_function
+
 from email.utils import parsedate
 import errno
 import oauth.oauth as oauth
@@ -361,7 +363,7 @@ if __name__ == "__main__":
             return (urllib2.urlopen(req).read())
 
         def printurl(url, headers_cb):
-            print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+            print("== %s ==\n%s\n" % (url, geturl(url, headers_cb)))
 
         def crawl(url, headers_cb=None):
             if url.endswith("/"):
@@ -386,9 +388,9 @@ if __name__ == "__main__":
                                                           version=args.apiver)
             else:
                 (userdata, metadata) = read_maas_seed_url(args.url)
-            print "=== userdata ==="
-            print userdata
-            print "=== metadata ==="
+            print("=== userdata ===")
+            print(userdata)
+            print("=== metadata ===")
             pprint.pprint(metadata)
 
         elif args.subcmd == "get":
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e2469f6e..f9dac29e 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -280,7 +280,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
 
     # allvars expands to all existing variables by using '${!x*}' notation
     # where x is lower or upper case letters or '_'
-    allvars = ["${!%s*}" % x for x in string.letters + "_"]
+    allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
 
     keylist_in = keylist
     if keylist is None:
diff --git a/cloudinit/templater.py b/cloudinit/templater.py
index 4cd3f13d..a9231482 100644
--- a/cloudinit/templater.py
+++ b/cloudinit/templater.py
@@ -137,7 +137,7 @@ def render_from_file(fn, params):
     return renderer(content, params)
 
 
-def render_to_file(fn, outfn, params, mode=0644):
+def render_to_file(fn, outfn, params, mode=0o644):
     contents = render_from_file(fn, params)
     util.write_file(outfn, contents, mode=mode)
 
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 434ba7fb..94fd5c70 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -142,6 +142,9 @@ class ProcessExecutionError(IOError):
             'reason': self.reason,
         }
         IOError.__init__(self, message)
+        # For backward compatibility with Python 2.
+        if not hasattr(self, 'message'):
+            self.message = message
 
 
 class SeLinuxGuard(object):
@@ -260,7 +263,7 @@ def translate_bool(val, addons=None):
 
 def rand_str(strlen=32, select_from=None):
     if not select_from:
-        select_from = string.letters + string.digits
+        select_from = string.ascii_letters + string.digits
     return "".join([random.choice(select_from) for _x in range(0, strlen)])
 
 
@@ -1127,7 +1130,7 @@ def pipe_in_out(in_fh, out_fh, chunk_size=1024, chunk_cb=None):
     bytes_piped = 0
     while True:
         data = in_fh.read(chunk_size)
-        if data == '':
+        if len(data) == 0:
             break
         else:
             out_fh.write(data)
diff --git a/templates/resolv.conf.tmpl b/templates/resolv.conf.tmpl
index 1300156c..bfae80db 100644
--- a/templates/resolv.conf.tmpl
+++ b/templates/resolv.conf.tmpl
@@ -24,7 +24,7 @@ sortlist {% for sort in sortlist %}{{sort}} {% endfor %}
 {% if options or flags %}
 
 options {% for flag in flags %}{{flag}} {% endfor %}
-{% for key, value in options.iteritems() -%}
+{% for key, value in options.items() -%}
  {{key}}:{{value}}
 {% endfor %}
 {% endif %}
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 38a2176d..70b8116f 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -65,7 +65,7 @@ if PY26:
         def assertDictContainsSubset(self, expected, actual, msg=None):
             missing = []
             mismatched = []
-            for k, v in expected.iteritems():
+            for k, v in expected.items():
                 if k not in actual:
                     missing.append(k)
                 elif actual[k] != v:
@@ -243,7 +243,7 @@ class HttprettyTestCase(TestCase):
 def populate_dir(path, files):
     if not os.path.exists(path):
         os.makedirs(path)
-    for (name, content) in files.iteritems():
+    for (name, content) in files.items():
         with open(os.path.join(path, name), "w") as fp:
             fp.write(content)
             fp.close()
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 800c5fd8..258c68e2 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -338,7 +338,7 @@ def populate_ds_from_read_config(cfg_ds, source, results):
 
 
 def populate_dir(seed_dir, files):
-    for (name, content) in files.iteritems():
+    for (name, content) in files.items():
         path = os.path.join(seed_dir, name)
         dirname = os.path.dirname(path)
         if not os.path.isdir(dirname):
diff --git a/tests/unittests/test_datasource/test_digitalocean.py b/tests/unittests/test_datasource/test_digitalocean.py
index d1270fc2..98f9cfac 100644
--- a/tests/unittests/test_datasource/test_digitalocean.py
+++ b/tests/unittests/test_datasource/test_digitalocean.py
@@ -18,8 +18,7 @@
 import httpretty
 import re
 
-from types import ListType
-from urlparse import urlparse
+from six.moves.urllib_parse import urlparse
 
 from cloudinit import settings
 from cloudinit import helpers
@@ -110,7 +109,7 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
         self.assertEqual([DO_META.get('public-keys')],
                          self.ds.get_public_ssh_keys())
 
-        self.assertIs(type(self.ds.get_public_ssh_keys()), ListType)
+        self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
 
     @httpretty.activate
     def test_multiple_ssh_keys(self):
@@ -124,4 +123,4 @@ class TestDataSourceDigitalOcean(test_helpers.HttprettyTestCase):
         self.assertEqual(DO_META.get('public-keys').splitlines(),
                          self.ds.get_public_ssh_keys())
 
-        self.assertIs(type(self.ds.get_public_ssh_keys()), ListType)
+        self.assertIsInstance(self.ds.get_public_ssh_keys(), list)
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 06050bb1..aa60eb33 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -19,7 +19,7 @@ import httpretty
 import re
 
 from base64 import b64encode, b64decode
-from urlparse import urlparse
+from six.moves.urllib_parse import urlparse
 
 from cloudinit import settings
 from cloudinit import helpers
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index ddf77265..b79237f0 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -294,7 +294,7 @@ class TestParseShellConfig(unittest.TestCase):
 
 def populate_context_dir(path, variables):
     data = "# Context variables generated by OpenNebula\n"
-    for (k, v) in variables.iteritems():
+    for (k, v) in variables.items():
         data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
     populate_dir(path, {'context.sh': data})
 
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 35d7ef5e..01b9b73e 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -22,6 +22,8 @@
 #   return responses.
 #
 
+from __future__ import print_function
+
 import base64
 from cloudinit import helpers as c_helpers
 from cloudinit.sources import DataSourceSmartOS
@@ -369,7 +371,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
                 permissions = oct(os.stat(name_f)[stat.ST_MODE])[-3:]
                 if re.match(r'.*\/mdata-user-data$', name_f):
                     found_new = True
-                    print name_f
+                    print(name_f)
                     self.assertEquals(permissions, '400')
 
         self.assertFalse(found_new)
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 2c3dad72..d72fa8c7 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -62,7 +62,7 @@ class TestAptProxyConfig(unittest.TestCase):
 
         contents = str(util.read_file_or_url(self.pfile))
 
-        for ptype, pval in values.iteritems():
+        for ptype, pval in values.items():
             self.assertTrue(self._search_apt_config(contents, ptype, pval))
 
     def test_proxy_deleted(self):
diff --git a/tests/unittests/test_merging.py b/tests/unittests/test_merging.py
index 07b610f7..976d8283 100644
--- a/tests/unittests/test_merging.py
+++ b/tests/unittests/test_merging.py
@@ -11,11 +11,13 @@ import glob
 import os
 import random
 import re
+import six
 import string
 
 SOURCE_PAT = "source*.*yaml"
 EXPECTED_PAT = "expected%s.yaml"
-TYPES = [long, int, dict, str, list, tuple, None]
+TYPES = [dict, str, list, tuple, None]
+TYPES.extend(six.integer_types)
 
 
 def _old_mergedict(src, cand):
@@ -25,7 +27,7 @@ def _old_mergedict(src, cand):
     Nested dictionaries are merged recursively.
     """
     if isinstance(src, dict) and isinstance(cand, dict):
-        for (k, v) in cand.iteritems():
+        for (k, v) in cand.items():
             if k not in src:
                 src[k] = v
             else:
@@ -42,8 +44,8 @@ def _old_mergemanydict(*args):
 
 def _random_str(rand):
     base = ''
-    for _i in xrange(rand.randint(1, 2 ** 8)):
-        base += rand.choice(string.letters + string.digits)
+    for _i in range(rand.randint(1, 2 ** 8)):
+        base += rand.choice(string.ascii_letters + string.digits)
     return base
 
 
@@ -64,7 +66,7 @@ def _make_dict(current_depth, max_depth, rand):
     if t in [dict, list, tuple]:
         if t in [dict]:
             amount = rand.randint(0, 5)
-            keys = [_random_str(rand) for _i in xrange(0, amount)]
+            keys = [_random_str(rand) for _i in range(0, amount)]
             base = {}
             for k in keys:
                 try:
@@ -74,14 +76,14 @@ def _make_dict(current_depth, max_depth, rand):
         elif t in [list, tuple]:
             base = []
             amount = rand.randint(0, 5)
-            for _i in xrange(0, amount):
+            for _i in range(0, amount):
                 try:
                     base.append(_make_dict(current_depth + 1, max_depth, rand))
                 except _NoMoreException:
                     pass
             if t in [tuple]:
                 base = tuple(base)
-    elif t in [long, int]:
+    elif t in six.integer_types:
         base = rand.randint(0, 2 ** 8)
     elif t in [str]:
         base = _random_str(rand)
diff --git a/tools/ccfg-merge-debug b/tools/ccfg-merge-debug
index 85227da7..1f08e0cb 100755
--- a/tools/ccfg-merge-debug
+++ b/tools/ccfg-merge-debug
@@ -51,7 +51,7 @@ def main():
     c_handlers.register(ccph)
 
     called = []
-    for (_ctype, mod) in c_handlers.iteritems():
+    for (_ctype, mod) in c_handlers.items():
         if mod in called:
             continue
         handlers.call_begin(mod, data, frequency)
@@ -76,7 +76,7 @@ def main():
 
     # Give callbacks opportunity to finalize
     called = []
-    for (_ctype, mod) in c_handlers.iteritems():
+    for (_ctype, mod) in c_handlers.items():
         if mod in called:
             continue
         handlers.call_end(mod, data, frequency)
-- 
cgit v1.2.3


From 09e81d572d8461d8546f66eacd005bf3c9ae0e39 Mon Sep 17 00:00:00 2001
From: Marco Morais <marco.em.morais@gmail.com>
Date: Thu, 22 Jan 2015 22:25:49 -0800
Subject: Make parameter list for get_hostname method consistent

The sources.DataSource class has method defined as:
   def get_hostname(self, fqdn=False, resolve_ip=False)

Make the parameter list for this method in DataSourceDigitalOcean
and DataSourceGCE consistent with superclass sources.DataSource.
---
 cloudinit/sources/DataSourceDigitalOcean.py | 2 +-
 cloudinit/sources/DataSourceGCE.py          | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 8f27ee89..fec9db4b 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -84,7 +84,7 @@ class DataSourceDigitalOcean(sources.DataSource):
     def get_instance_id(self):
         return self.metadata['id']
 
-    def get_hostname(self, fqdn=False):
+    def get_hostname(self, fqdn=False, resolve_ip=False):
         return self.metadata['hostname']
 
     def get_package_mirror_info(self):
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 2cf8fdcd..6936c74e 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -126,7 +126,7 @@ class DataSourceGCE(sources.DataSource):
     def get_public_ssh_keys(self):
         return self.metadata['public-keys']
 
-    def get_hostname(self, fqdn=False, _resolve_ip=False):
+    def get_hostname(self, fqdn=False, resolve_ip=False):
         # GCE has long FDQN's and has asked for short hostnames
         return self.metadata['local-hostname'].split('.')[0]
 
-- 
cgit v1.2.3


From 3246c85763d5cdebb3e240fcd5ae29834cbf6299 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Fri, 23 Jan 2015 15:34:56 -0500
Subject: * Fix the filter() imports. * In Py3, pass universal_newlines to
 subprocess.Popen()

---
 cloudinit/distros/__init__.py | 8 --------
 cloudinit/util.py             | 9 ++++++---
 2 files changed, 6 insertions(+), 11 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 6b96d58c..00fb95fb 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -36,14 +36,6 @@ from cloudinit import util
 
 from cloudinit.distros.parsers import hosts
 
-try:
-    # Python 3
-    from six import filter
-except ImportError:
-    # Python 2
-    from itertools import ifilter as filter
-
-
 
 OSFAMILIES = {
     'debian': ['debian', 'ubuntu'],
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 94fd5c70..25c104c7 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1589,9 +1589,12 @@ def subp(args, data=None, rcs=None, env=None, capture=True, shell=False,
             stdout = subprocess.PIPE
             stderr = subprocess.PIPE
         stdin = subprocess.PIPE
-        sp = subprocess.Popen(args, stdout=stdout,
-                        stderr=stderr, stdin=stdin,
-                        env=env, shell=shell)
+        kws = dict(stdout=stdout, stderr=stderr, stdin=stdin,
+                   env=env, shell=shell)
+        if six.PY3:
+            # Use this so subprocess output will be (Python 3) str, not bytes.
+            kws['universal_newlines'] = True
+        sp = subprocess.Popen(args, **kws)
         (out, err) = sp.communicate(data)
     except OSError as e:
         raise ProcessExecutionError(cmd=args, reason=e)
-- 
cgit v1.2.3


From 841db73600e3f203243c773109d71ab88d3334bc Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 11:14:06 -0500
Subject: More test repairs.

---
 cloudinit/distros/__init__.py                      |  2 +-
 cloudinit/user_data.py                             |  9 +++++++
 tests/unittests/helpers.py                         | 12 ++++++---
 tests/unittests/test_builtin_handlers.py           |  1 -
 tests/unittests/test_datasource/test_azure.py      | 31 +++++++++++++---------
 tests/unittests/test_datasource/test_gce.py        |  2 +-
 tests/unittests/test_datasource/test_opennebula.py | 10 +++++--
 tests/unittests/test_datasource/test_smartos.py    | 12 ++++++---
 tests/unittests/test_filters/test_launch_index.py  |  8 +++---
 tests/unittests/test_handler/test_handler_chef.py  |  3 ++-
 .../test_handler/test_handler_seed_random.py       | 11 ++++++--
 tests/unittests/test_util.py                       |  6 ++---
 12 files changed, 73 insertions(+), 34 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 00fb95fb..ab874b45 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -857,7 +857,7 @@ def extract_default(users, default_name=None, default_config=None):
     if not tmp_users:
         return (default_name, default_config)
     else:
-        name = tmp_users.keys()[0]
+        name = list(tmp_users)[0]
         config = tmp_users[name]
         config.pop('default', None)
         return (name, config)
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 9111bd39..ff21259c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -109,6 +109,15 @@ class UserDataProcessor(object):
             ctype = None
             ctype_orig = part.get_content_type()
             payload = part.get_payload(decode=True)
+            # In Python 3, decoding the payload will ironically hand us a
+            # bytes object.  'decode' means to decode according to
+            # Content-Transfer-Encoding, not according to any charset in the
+            # Content-Type.  So, if we end up with bytes, first try to decode
+            # to str via CT charset, and failing that, try utf-8 using
+            # surrogate escapes.
+            if six.PY3 and isinstance(payload, bytes):
+                charset = part.get_charset() or 'utf-8'
+                payload = payload.decode(charset, errors='surrogateescape')
             was_compressed = False
 
             # When the message states it is of a gzipped content type ensure
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 70b8116f..4b8dcc5c 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -1,8 +1,11 @@
 import os
 import sys
+import shutil
 import tempfile
 import unittest
 
+import six
+
 try:
     from unittest import mock
 except ImportError:
@@ -15,8 +18,6 @@ except ImportError:
 from cloudinit import helpers as ch
 from cloudinit import util
 
-import shutil
-
 # Used for detecting different python versions
 PY2 = False
 PY26 = False
@@ -115,7 +116,12 @@ def retarget_many_wrapper(new_base, am, old_func):
             nam = len(n_args)
         for i in range(0, nam):
             path = args[i]
-            n_args[i] = rebase_path(path, new_base)
+            # patchOS() wraps various os and os.path functions, however in
+            # Python 3 some of these now accept file-descriptors (integers).
+            # That breaks rebase_path() so in lieu of a better solution, just
+            # don't rebase if we get a fd.
+            if isinstance(path, six.string_types):
+                n_args[i] = rebase_path(path, new_base)
         return old_func(*n_args, **kwds)
     return wrapper
 
diff --git a/tests/unittests/test_builtin_handlers.py b/tests/unittests/test_builtin_handlers.py
index 47ff6318..ad32d0b2 100644
--- a/tests/unittests/test_builtin_handlers.py
+++ b/tests/unittests/test_builtin_handlers.py
@@ -21,7 +21,6 @@ from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE)
 
 
 class TestBuiltins(test_helpers.FilesystemMockingTestCase):
-
     def test_upstart_frequency_no_out(self):
         c_root = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, c_root)
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 2dbcd389..1f0330b3 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -22,6 +22,13 @@ import tempfile
 import unittest
 
 
+def b64(source):
+    # In Python 3, b64encode only accepts bytes and returns bytes.
+    if not isinstance(source, bytes):
+        source = source.encode('utf-8')
+    return base64.b64encode(source).decode('us-ascii')
+
+
 def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
     if data is None:
         data = {'HostName': 'FOOHOST'}
@@ -51,7 +58,7 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
         content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
 
     if userdata:
-        content += "<UserData>%s</UserData>\n" % (base64.b64encode(userdata))
+        content += "<UserData>%s</UserData>\n" % (b64(userdata))
 
     if pubkeys:
         content += "<SSH><PublicKeys>\n"
@@ -181,7 +188,7 @@ class TestAzureDataSource(unittest.TestCase):
         # set dscfg in via base64 encoded yaml
         cfg = {'agent_command': "my_command"}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+                'dscfg': {'text': b64(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
@@ -233,13 +240,13 @@ class TestAzureDataSource(unittest.TestCase):
 
     def test_userdata_found(self):
         mydata = "FOOBAR"
-        odata = {'UserData': base64.b64encode(mydata)}
+        odata = {'UserData': b64(mydata)}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         dsrc = self._get_ds(data)
         ret = dsrc.get_data()
         self.assertTrue(ret)
-        self.assertEqual(dsrc.userdata_raw, mydata)
+        self.assertEqual(dsrc.userdata_raw, mydata.encode('utf-8'))
 
     def test_no_datasource_expected(self):
         # no source should be found if no seed_dir and no devs
@@ -281,7 +288,7 @@ class TestAzureDataSource(unittest.TestCase):
                                    'command': 'my-bounce-command',
                                    'hostname_command': 'my-hostname-command'}}
         odata = {'HostName': "xhost",
-                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+                'dscfg': {'text': b64(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
         self._get_ds(data).get_data()
@@ -296,7 +303,7 @@ class TestAzureDataSource(unittest.TestCase):
         # config specifying set_hostname off should not bounce
         cfg = {'set_hostname': False}
         odata = {'HostName': "xhost",
-                'dscfg': {'text': base64.b64encode(yaml.dump(cfg)),
+                'dscfg': {'text': b64(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
         self._get_ds(data).get_data()
@@ -325,7 +332,7 @@ class TestAzureDataSource(unittest.TestCase):
         # Make sure that user can affect disk aliases
         dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': base64.b64encode(yaml.dump(dscfg)),
+                'dscfg': {'text': b64(yaml.dump(dscfg)),
                           'encoding': 'base64'}}
         usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
                                   'ephemeral0': False}}
@@ -347,7 +354,7 @@ class TestAzureDataSource(unittest.TestCase):
         dsrc = self._get_ds(data)
         dsrc.get_data()
 
-        self.assertEqual(userdata, dsrc.userdata_raw)
+        self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
 
     def test_ovf_env_arrives_in_waagent_dir(self):
         xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
@@ -362,7 +369,7 @@ class TestAzureDataSource(unittest.TestCase):
 
     def test_existing_ovf_same(self):
         # waagent/SharedConfig left alone if found ovf-env.xml same as cached
-        odata = {'UserData': base64.b64encode("SOMEUSERDATA")}
+        odata = {'UserData': b64("SOMEUSERDATA")}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         populate_dir(self.waagent_d,
@@ -386,9 +393,9 @@ class TestAzureDataSource(unittest.TestCase):
         # 'get_data' should remove SharedConfig.xml in /var/lib/waagent
         # if ovf-env.xml differs.
         cached_ovfenv = construct_valid_ovf_env(
-            {'userdata': base64.b64encode("FOO_USERDATA")})
+            {'userdata': b64("FOO_USERDATA")})
         new_ovfenv = construct_valid_ovf_env(
-            {'userdata': base64.b64encode("NEW_USERDATA")})
+            {'userdata': b64("NEW_USERDATA")})
 
         populate_dir(self.waagent_d,
             {'ovf-env.xml': cached_ovfenv,
@@ -398,7 +405,7 @@ class TestAzureDataSource(unittest.TestCase):
         dsrc = self._get_ds({'ovfcontent': new_ovfenv})
         ret = dsrc.get_data()
         self.assertTrue(ret)
-        self.assertEqual(dsrc.userdata_raw, "NEW_USERDATA")
+        self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA")
         self.assertTrue(os.path.exists(
             os.path.join(self.waagent_d, 'otherfile')))
         self.assertFalse(
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index aa60eb33..6dd4b5ed 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -45,7 +45,7 @@ GCE_META_ENCODING = {
     'instance/id': '12345',
     'instance/hostname': 'server.project-baz.local',
     'instance/zone': 'baz/bang',
-    'instance/attributes/user-data': b64encode('/bin/echo baz\n'),
+    'instance/attributes/user-data': b64encode(b'/bin/echo baz\n'),
     'instance/attributes/user-data-encoding': 'base64',
 }
 
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index b79237f0..1a8d2122 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -10,6 +10,12 @@ import shutil
 import tempfile
 import unittest
 
+def b64(source):
+    # In Python 3, b64encode only accepts bytes and returns bytes.
+    if not isinstance(source, bytes):
+        source = source.encode('utf-8')
+    return b64encode(source).decode('us-ascii')
+
 
 TEST_VARS = {
     'VAR1': 'single',
@@ -180,7 +186,7 @@ class TestOpenNebulaDataSource(unittest.TestCase):
             self.assertEqual(USER_DATA, results['userdata'])
 
     def test_user_data_encoding_required_for_decode(self):
-        b64userdata = b64encode(USER_DATA)
+        b64userdata = b64(USER_DATA)
         for k in ('USER_DATA', 'USERDATA'):
             my_d = os.path.join(self.tmp, k)
             populate_context_dir(my_d, {k: b64userdata})
@@ -192,7 +198,7 @@ class TestOpenNebulaDataSource(unittest.TestCase):
     def test_user_data_base64_encoding(self):
         for k in ('USER_DATA', 'USERDATA'):
             my_d = os.path.join(self.tmp, k)
-            populate_context_dir(my_d, {k: b64encode(USER_DATA),
+            populate_context_dir(my_d, {k: b64(USER_DATA),
                                         'USERDATA_ENCODING': 'base64'})
             results = ds.read_context_disk_dir(my_d)
 
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 01b9b73e..2fb9e1b6 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -36,6 +36,12 @@ import tempfile
 import stat
 import uuid
 
+def b64(source):
+    # In Python 3, b64encode only accepts bytes and returns bytes.
+    if not isinstance(source, bytes):
+        source = source.encode('utf-8')
+    return base64.b64encode(source).decode('us-ascii')
+
 
 MOCK_RETURNS = {
     'hostname': 'test-host',
@@ -233,7 +239,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns = MOCK_RETURNS.copy()
         my_returns['base64_all'] = "true"
         for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = base64.b64encode(my_returns[k])
+            my_returns[k] = b64(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
@@ -254,7 +260,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns['b64-cloud-init:user-data'] = "true"
         my_returns['b64-hostname'] = "true"
         for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = base64.b64encode(my_returns[k])
+            my_returns[k] = b64(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
@@ -270,7 +276,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns = MOCK_RETURNS.copy()
         my_returns['base64_keys'] = 'hostname,ignored'
         for k in ('hostname',):
-            my_returns[k] = base64.b64encode(my_returns[k])
+            my_returns[k] = b64(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
diff --git a/tests/unittests/test_filters/test_launch_index.py b/tests/unittests/test_filters/test_launch_index.py
index 2f4c2fda..95d24b9b 100644
--- a/tests/unittests/test_filters/test_launch_index.py
+++ b/tests/unittests/test_filters/test_launch_index.py
@@ -2,7 +2,7 @@ import copy
 
 from .. import helpers
 
-import itertools
+from six.moves import filterfalse
 
 from cloudinit.filters import launch_index
 from cloudinit import user_data as ud
@@ -36,11 +36,9 @@ class TestLaunchFilter(helpers.ResourceUsingTestCase):
             return False
         # Do some basic payload checking
         msg1_msgs = [m for m in msg1.walk()]
-        msg1_msgs = [m for m in
-                     itertools.ifilterfalse(ud.is_skippable, msg1_msgs)]
+        msg1_msgs = [m for m in filterfalse(ud.is_skippable, msg1_msgs)]
         msg2_msgs = [m for m in msg2.walk()]
-        msg2_msgs = [m for m in
-                     itertools.ifilterfalse(ud.is_skippable, msg2_msgs)]
+        msg2_msgs = [m for m in filterfalse(ud.is_skippable, msg2_msgs)]
         for i in range(0, len(msg2_msgs)):
             m1_msg = msg1_msgs[i]
             m2_msg = msg2_msgs[i]
diff --git a/tests/unittests/test_handler/test_handler_chef.py b/tests/unittests/test_handler/test_handler_chef.py
index b06a160c..8ab27911 100644
--- a/tests/unittests/test_handler/test_handler_chef.py
+++ b/tests/unittests/test_handler/test_handler_chef.py
@@ -11,6 +11,7 @@ from cloudinit.sources import DataSourceNone
 
 from .. import helpers as t_help
 
+import six
 import logging
 import shutil
 import tempfile
@@ -77,7 +78,7 @@ class TestChef(t_help.FilesystemMockingTestCase):
         for k, v in cfg['chef'].items():
             self.assertIn(v, c)
         for k, v in cc_chef.CHEF_RB_TPL_DEFAULTS.items():
-            if isinstance(v, basestring):
+            if isinstance(v, six.string_types):
                 self.assertIn(v, c)
         c = util.load_file(cc_chef.CHEF_FB_PATH)
         self.assertEqual({}, json.loads(c))
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 579377fb..c2da5ced 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -38,6 +38,13 @@ import logging
 LOG = logging.getLogger(__name__)
 
 
+def b64(source):
+    # In Python 3, b64encode only accepts bytes and returns bytes.
+    if not isinstance(source, bytes):
+        source = source.encode('utf-8')
+    return base64.b64encode(source).decode('us-ascii')
+
+
 class TestRandomSeed(t_help.TestCase):
     def setUp(self):
         super(TestRandomSeed, self).setUp()
@@ -134,7 +141,7 @@ class TestRandomSeed(t_help.TestCase):
         self.assertEquals("big-toe", contents)
 
     def test_append_random_base64(self):
-        data = base64.b64encode('bubbles')
+        data = b64('bubbles')
         cfg = {
             'random_seed': {
                 'file': self._seed_file,
@@ -147,7 +154,7 @@ class TestRandomSeed(t_help.TestCase):
         self.assertEquals("bubbles", contents)
 
     def test_append_random_b64(self):
-        data = base64.b64encode('kit-kat')
+        data = b64('kit-kat')
         cfg = {
             'random_seed': {
                 'file': self._seed_file,
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index b1f5d62c..b0207ace 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -119,7 +119,7 @@ class TestWriteFile(unittest.TestCase):
 
         # Create file first with basic content
         with open(path, "wb") as f:
-            f.write("LINE1\n")
+            f.write(b"LINE1\n")
         util.write_file(path, contents, omode="a")
 
         self.assertTrue(os.path.exists(path))
@@ -194,7 +194,7 @@ class TestDeleteDirContents(unittest.TestCase):
         os.mkdir(os.path.join(self.tmp, "new_dir"))
         f_name = os.path.join(self.tmp, "new_dir", "new_file.txt")
         with open(f_name, "wb") as f:
-            f.write("DELETE ME")
+            f.write(b"DELETE ME")
 
         util.delete_dir_contents(self.tmp)
 
@@ -205,7 +205,7 @@ class TestDeleteDirContents(unittest.TestCase):
         file_name = os.path.join(self.tmp, "new_file.txt")
         link_name = os.path.join(self.tmp, "new_file_link.txt")
         with open(file_name, "wb") as f:
-            f.write("DELETE ME")
+            f.write(b"DELETE ME")
         os.symlink(file_name, link_name)
 
         util.delete_dir_contents(self.tmp)
-- 
cgit v1.2.3


From 0e7e5041a0ef80099c48341952e881009eb65fdf Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 12:27:51 -0500
Subject: Fix a few string/bytes problems with Python 3.

---
 cloudinit/handlers/__init__.py | 21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index d67a70ea..cdccf122 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -22,6 +22,7 @@
 
 import abc
 import os
+import six
 
 from cloudinit.settings import (PER_ALWAYS, PER_INSTANCE, FREQUENCIES)
 
@@ -174,11 +175,11 @@ def _extract_first_or_bytes(blob, size):
 
 def _escape_string(text):
     try:
-        return text.encode("string-escape")
-    except TypeError:
+        return text.encode("string_escape")
+    except (LookupError, TypeError):
         try:
-            # Unicode doesn't support string-escape...
-            return text.encode('unicode-escape')
+            # Unicode (and Python 3's str) doesn't support string_escape...
+            return text.encode('unicode_escape')
         except TypeError:
             # Give up...
             pass
@@ -232,7 +233,17 @@ def walk(msg, callback, data):
         headers = dict(part)
         LOG.debug(headers)
         headers['Content-Type'] = ctype
-        callback(data, filename, part.get_payload(decode=True), headers)
+        payload = part.get_payload(decode=True)
+        # In Python 3, decoding the payload will ironically hand us a bytes
+        # object.  'decode' means to decode according to
+        # Content-Transfer-Encoding, not according to any charset in the
+        # Content-Type.  So, if we end up with bytes, first try to decode to
+        # str via CT charset, and failing that, try utf-8 using surrogate
+        # escapes.
+        if six.PY3 and isinstance(payload, bytes):
+            charset = part.get_charset() or 'utf-8'
+            payload = payload.decode(charset, errors='surrogateescape')
+        callback(data, filename, payload, headers)
         partnum = partnum + 1
 
 
-- 
cgit v1.2.3


From e085d5cec212757e0ffffaa1be470e315142a2aa Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 12:36:13 -0500
Subject: Avoid a nose bug when running under the test suite and no exception
 is in flight.

---
 cloudinit/util.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 25c104c7..d594b611 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1236,8 +1236,15 @@ def logexc(log, msg, *args):
     # coming out to a non-debug stream
     if msg:
         log.warn(msg, *args)
-    # Debug gets the full trace
-    log.debug(msg, exc_info=1, *args)
+    # Debug gets the full trace.  However, nose has a bug whereby its
+    # logcapture plugin doesn't properly handle the case where there is no
+    # actual exception.  To avoid tracebacks during the test suite then, we'll
+    # do the actual exc_info extraction here, and if there is no exception in
+    # flight, we'll just pass in None.
+    exc_info = sys.exc_info()
+    if exc_info == (None, None, None):
+        exc_info = None
+    log.debug(msg, exc_info=exc_info, *args)
 
 
 def hash_blob(blob, routine, mlen=None):
-- 
cgit v1.2.3


From 926a3df79a10ede61967c60f48ff0670a36e689a Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 12:41:04 -0500
Subject: More Python 3 test fixes.

---
 cloudinit/config/cc_write_files.py                 | 5 +++--
 tests/unittests/test_datasource/test_opennebula.py | 2 +-
 2 files changed, 4 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_write_files.py b/cloudinit/config/cc_write_files.py
index a73d6f4e..4b03ea91 100644
--- a/cloudinit/config/cc_write_files.py
+++ b/cloudinit/config/cc_write_files.py
@@ -18,6 +18,7 @@
 
 import base64
 import os
+import six
 
 from cloudinit.settings import PER_INSTANCE
 from cloudinit import util
@@ -25,7 +26,7 @@ from cloudinit import util
 frequency = PER_INSTANCE
 
 DEFAULT_OWNER = "root:root"
-DEFAULT_PERMS = 0644
+DEFAULT_PERMS = 0o644
 UNKNOWN_ENC = 'text/plain'
 
 
@@ -79,7 +80,7 @@ def write_files(name, files, log):
 
 def decode_perms(perm, default, log):
     try:
-        if isinstance(perm, (int, long, float)):
+        if isinstance(perm, six.integer_types + (float,)):
             # Just 'downcast' it (if a float)
             return int(perm)
         else:
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index 1a8d2122..31c6232f 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -33,7 +33,7 @@ TEST_VARS = {
 }
 
 INVALID_CONTEXT = ';'
-USER_DATA = '#cloud-config\napt_upgrade: true'
+USER_DATA = b'#cloud-config\napt_upgrade: true'
 SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
 HOSTNAME = 'foo.example.com'
 PUBLIC_IP = '10.0.0.3'
-- 
cgit v1.2.3


From de5974fe93dd717e0c7ba6de17db3192cc258cff Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 14:31:09 -0500
Subject: * More str/bytes fixes. * Temporarily skip the MAAS tests in py3
 since they need to be ported to oauthlib.

---
 cloudinit/sources/DataSourceOpenNebula.py          | 12 +++++++++---
 cloudinit/sources/DataSourceSmartOS.py             | 15 +++++++++++++--
 tests/unittests/test_datasource/test_maas.py       |  7 ++++++-
 tests/unittests/test_datasource/test_opennebula.py |  4 ++--
 4 files changed, 30 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index f9dac29e..691b39f8 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -379,7 +379,8 @@ def read_context_disk_dir(source_dir, asuser=None):
                 raise BrokenContextDiskDir("configured user '%s' "
                                            "does not exist", asuser)
         try:
-            with open(os.path.join(source_dir, 'context.sh'), 'r') as f:
+            path = os.path.join(source_dir, 'context.sh')
+            with open(path, 'r', encoding='utf-8') as f:
                 content = f.read().strip()
 
             context = parse_shell_config(content, asuser=asuser)
@@ -426,14 +427,19 @@ def read_context_disk_dir(source_dir, asuser=None):
                                context.get('USER_DATA_ENCODING'))
         if encoding == "base64":
             try:
-                results['userdata'] = base64.b64decode(results['userdata'])
+                userdata = base64.b64decode(results['userdata'])
+                # In Python 3 we still expect a str, but b64decode will return
+                # bytes.  Convert to str.
+                if isinstance(userdata, bytes):
+                    userdata = userdata.decode('utf-8')
+                results['userdata'] = userdata
             except TypeError:
                 LOG.warn("Failed base64 decoding of userdata")
 
     # generate static /etc/network/interfaces
     # only if there are any required context variables
     # http://opennebula.org/documentation:rel3.8:cong#network_configuration
-    for k in context.keys():
+    for k in context:
         if re.match(r'^ETH\d+_IP$', k):
             (out, _) = util.subp(['/sbin/ip', 'link'])
             net = OpenNebulaNetwork(out, context)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 7a975d78..d3ed40c5 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -30,6 +30,7 @@
 #       Comments with "@datadictionary" are snippets of the definition
 
 import base64
+import binascii
 import os
 import serial
 
@@ -350,8 +351,18 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
 
     if b64:
         try:
-            return base64.b64decode(resp)
-        except TypeError:
+            # Generally, we want native strings in the values.  Python 3's
+            # b64decode will return bytes though, so decode them to utf-8 if
+            # possible.  If that fails, return the bytes.
+            decoded = base64.b64decode(resp)
+            try:
+                if isinstance(decoded, bytes):
+                    return decoded.decode('utf-8')
+            except UnicodeDecodeError:
+                pass
+            return decoded
+        # Bogus input produces different errors in Python 2 and 3; catch both.
+        except (TypeError, binascii.Error):
             LOG.warn("Failed base64 decoding key '%s'", noun)
             return resp
 
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 6af0cd82..66fe22ae 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -4,7 +4,11 @@ import shutil
 import tempfile
 import unittest
 
-from cloudinit.sources import DataSourceMAAS
+# XXX DataSourceMAAS must be ported to oauthlib for Python 3
+import six
+if not six.PY3:
+    from cloudinit.sources import DataSourceMAAS
+
 from cloudinit import url_helper
 from ..helpers import populate_dir
 
@@ -14,6 +18,7 @@ except ImportError:
     import mock
 
 
+@unittest.skipIf(six.PY3, 'DataSourceMAAS must be ported to oauthlib')
 class TestMAASDataSource(unittest.TestCase):
 
     def setUp(self):
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index 31c6232f..ef534bab 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -33,7 +33,7 @@ TEST_VARS = {
 }
 
 INVALID_CONTEXT = ';'
-USER_DATA = b'#cloud-config\napt_upgrade: true'
+USER_DATA = '#cloud-config\napt_upgrade: true'
 SSH_KEY = 'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460-%i'
 HOSTNAME = 'foo.example.com'
 PUBLIC_IP = '10.0.0.3'
@@ -300,7 +300,7 @@ class TestParseShellConfig(unittest.TestCase):
 
 def populate_context_dir(path, variables):
     data = "# Context variables generated by OpenNebula\n"
-    for (k, v) in variables.items():
+    for k, v in variables.items():
         data += ("%s='%s'\n" % (k.upper(), v.replace(r"'", r"'\''")))
     populate_dir(path, {'context.sh': data})
 
-- 
cgit v1.2.3


From 18b35de06432869a9d859e2978e7e9567eba66a2 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 14:48:23 -0500
Subject: Another handling of b64decode.

Also, restore Python 2 compatibility.
---
 cloudinit/config/cc_seed_random.py        |  8 +++++++-
 cloudinit/sources/DataSourceOpenNebula.py | 11 ++++++++++-
 2 files changed, 17 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 3b7235bf..981e1b08 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -38,7 +38,13 @@ def _decode(data, encoding=None):
     if not encoding or encoding.lower() in ['raw']:
         return data
     elif encoding.lower() in ['base64', 'b64']:
-        return base64.b64decode(data)
+        # Try to give us a native string in both Python 2 and 3, and remember
+        # that b64decode() returns bytes in Python 3.
+        decoded = base64.b64decode(data)
+        try:
+            return decoded.decode('utf-8')
+        except UnicodeDecodeError:
+            return decoded
     elif encoding.lower() in ['gzip', 'gz']:
         return util.decomp_gzip(data, quiet=False)
     else:
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 691b39f8..6da569ec 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -25,6 +25,7 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import base64
+import codecs
 import os
 import pwd
 import re
@@ -34,6 +35,8 @@ from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import util
 
+import six
+
 LOG = logging.getLogger(__name__)
 
 DEFAULT_IID = "iid-dsopennebula"
@@ -43,6 +46,12 @@ CONTEXT_DISK_FILES = ["context.sh"]
 VALID_DSMODES = ("local", "net", "disabled")
 
 
+def utf8_open(path):
+    if six.PY3:
+        return open(path, 'r', encoding='utf-8')
+    return codecs.open(path, 'r', encoding='utf-8')
+
+
 class DataSourceOpenNebula(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -380,7 +389,7 @@ def read_context_disk_dir(source_dir, asuser=None):
                                            "does not exist", asuser)
         try:
             path = os.path.join(source_dir, 'context.sh')
-            with open(path, 'r', encoding='utf-8') as f:
+            with utf8_open(path) as f:
                 content = f.read().strip()
 
             context = parse_shell_config(content, asuser=asuser)
-- 
cgit v1.2.3


From 2329d28a316e0ea6874b9457a1c04b37895adfd2 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 16:18:57 -0500
Subject: Python 3 tests pass, except for skips.

---
 cloudinit/user_data.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index ff21259c..3f860f3b 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -108,6 +108,7 @@ class UserDataProcessor(object):
 
             ctype = None
             ctype_orig = part.get_content_type()
+            ctype_main = part.get_content_maintype()
             payload = part.get_payload(decode=True)
             # In Python 3, decoding the payload will ironically hand us a
             # bytes object.  'decode' means to decode according to
@@ -115,7 +116,7 @@ class UserDataProcessor(object):
             # Content-Type.  So, if we end up with bytes, first try to decode
             # to str via CT charset, and failing that, try utf-8 using
             # surrogate escapes.
-            if six.PY3 and isinstance(payload, bytes):
+            if six.PY3 and ctype_main == 'text' and isinstance(payload, bytes):
                 charset = part.get_charset() or 'utf-8'
                 payload = payload.decode(charset, errors='surrogateescape')
             was_compressed = False
@@ -131,6 +132,7 @@ class UserDataProcessor(object):
                     ctype_orig = None
                     was_compressed = True
                 except util.DecompressionError as e:
+                    import pdb; pdb.set_trace()
                     LOG.warn("Failed decompressing payload from %s of length"
                              " %s due to: %s", ctype_orig, len(payload), e)
                     continue
-- 
cgit v1.2.3


From fabff4aec884467729fc372bb67f240752c15511 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 16:37:29 -0500
Subject: Port the MAAS code to oauthlib.

---
 cloudinit/sources/DataSourceMAAS.py          | 56 ++++++++++++++++------------
 requirements.txt                             |  2 +-
 tests/unittests/test_datasource/test_maas.py |  7 +---
 3 files changed, 35 insertions(+), 30 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 8f9c81de..39296f08 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -22,10 +22,11 @@ from __future__ import print_function
 
 from email.utils import parsedate
 import errno
-import oauth.oauth as oauth
+import oauthlib
 import os
 import time
-import urllib2
+
+from six.moves.urllib_request import Request, urlopen
 
 from cloudinit import log as logging
 from cloudinit import sources
@@ -274,25 +275,34 @@ def check_seed_contents(content, seed):
 
 def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
                   timestamp=None):
-    consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
-    token = oauth.OAuthToken(token_key, token_secret)
-
-    if timestamp is None:
-        ts = int(time.time())
-    else:
-        ts = timestamp
-
-    params = {
-        'oauth_version': "1.0",
-        'oauth_nonce': oauth.generate_nonce(),
-        'oauth_timestamp': ts,
-        'oauth_token': token.key,
-        'oauth_consumer_key': consumer.key,
-    }
-    req = oauth.OAuthRequest(http_url=url, parameters=params)
-    req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
-                     consumer, token)
-    return req.to_header()
+    client = oauthlib.oauth1.Client(
+        consumer_key,
+        client_secret=consumer_secret,
+        resource_owner_key=token_key,
+        resource_owner_secret=token_secret,
+        signature_method=oauthlib.SIGNATURE_PLAINTEXT)
+    uri, signed_headers, body = client.sign(url)
+    return signed_headers
+
+    ## consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
+    ## token = oauth.OAuthToken(token_key, token_secret)
+
+    ## if timestamp is None:
+    ##     ts = int(time.time())
+    ## else:
+    ##     ts = timestamp
+
+    ## params = {
+    ##     'oauth_version': "1.0",
+    ##     'oauth_nonce': oauth.generate_nonce(),
+    ##     'oauth_timestamp': ts,
+    ##     'oauth_token': token.key,
+    ##     'oauth_consumer_key': consumer.key,
+    ## }
+    ## req = oauth.OAuthRequest(http_url=url, parameters=params)
+    ## req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
+    ##                  consumer, token)
+    ## return req.to_header()
 
 
 class MAASSeedDirNone(Exception):
@@ -359,8 +369,8 @@ if __name__ == "__main__":
                     creds[key] = cfg[key]
 
         def geturl(url, headers_cb):
-            req = urllib2.Request(url, data=None, headers=headers_cb(url))
-            return (urllib2.urlopen(req).read())
+            req = Request(url, data=None, headers=headers_cb(url))
+            return urlopen(req).read()
 
         def printurl(url, headers_cb):
             print("== %s ==\n%s\n" % (url, geturl(url, headers_cb)))
diff --git a/requirements.txt b/requirements.txt
index 2a12ca3e..19c88857 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -8,7 +8,7 @@ PrettyTable
 
 # This one is currently only used by the MAAS datasource. If that
 # datasource is removed, this is no longer needed
-oauth
+oauthlib
 
 # This one is currently used only by the CloudSigma and SmartOS datasources.
 # If these datasources are removed, this is no longer needed
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 66fe22ae..6af0cd82 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -4,11 +4,7 @@ import shutil
 import tempfile
 import unittest
 
-# XXX DataSourceMAAS must be ported to oauthlib for Python 3
-import six
-if not six.PY3:
-    from cloudinit.sources import DataSourceMAAS
-
+from cloudinit.sources import DataSourceMAAS
 from cloudinit import url_helper
 from ..helpers import populate_dir
 
@@ -18,7 +14,6 @@ except ImportError:
     import mock
 
 
-@unittest.skipIf(six.PY3, 'DataSourceMAAS must be ported to oauthlib')
 class TestMAASDataSource(unittest.TestCase):
 
     def setUp(self):
-- 
cgit v1.2.3


From 5e2b8ef0703eb4582a5a8ba50ae7c83a8294d65a Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Mon, 26 Jan 2015 20:02:31 -0500
Subject: Repair the Python 2.6 tests.

---
 cloudinit/util.py                                  | 18 ++++++++--------
 tests/unittests/helpers.py                         | 25 +++++++++++++++++++---
 tests/unittests/test__init__.py                    |  6 ++++--
 tests/unittests/test_cs_util.py                    | 16 +++++++++++++-
 tests/unittests/test_datasource/test_azure.py      |  7 +++---
 tests/unittests/test_datasource/test_cloudsigma.py |  1 +
 .../unittests/test_datasource/test_configdrive.py  |  6 ++++--
 tests/unittests/test_datasource/test_maas.py       |  5 ++---
 tests/unittests/test_datasource/test_nocloud.py    |  7 +++---
 tests/unittests/test_datasource/test_opennebula.py |  4 ++--
 tests/unittests/test_distros/test_netconfig.py     |  4 ++--
 tests/unittests/test_distros/test_resolv.py        |  4 ++--
 tests/unittests/test_distros/test_sysconfig.py     |  4 ++--
 .../test_distros/test_user_data_normalize.py       |  7 +++---
 .../test_handler/test_handler_apt_configure.py     |  3 ++-
 .../test_handler/test_handler_ca_certs.py          |  7 +++---
 .../test_handler/test_handler_growpart.py          |  3 ++-
 tests/unittests/test_pathprefix2dict.py            |  6 +++---
 tests/unittests/test_templating.py                 | 19 +++++++++++++++-
 tests/unittests/test_util.py                       | 15 ++++++-------
 tox.ini                                            | 10 +++++++++
 21 files changed, 122 insertions(+), 55 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 32c19ba2..766f8e32 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2059,23 +2059,23 @@ def _read_dmi_syspath(key):
     Reads dmi data with from /sys/class/dmi/id
     """
 
-    dmi_key = "{}/{}".format(DMI_SYS_PATH, key)
-    LOG.debug("querying dmi data {}".format(dmi_key))
+    dmi_key = "{0}/{1}".format(DMI_SYS_PATH, key)
+    LOG.debug("querying dmi data {0}".format(dmi_key))
     try:
         if not os.path.exists(dmi_key):
-            LOG.debug("did not find {}".format(dmi_key))
+            LOG.debug("did not find {0}".format(dmi_key))
             return None
 
         key_data = load_file(dmi_key)
         if not key_data:
-            LOG.debug("{} did not return any data".format(key))
+            LOG.debug("{0} did not return any data".format(key))
             return None
 
-        LOG.debug("dmi data {} returned {}".format(dmi_key, key_data))
+        LOG.debug("dmi data {0} returned {0}".format(dmi_key, key_data))
         return key_data.strip()
 
     except Exception as e:
-        logexc(LOG, "failed read of {}".format(dmi_key), e)
+        logexc(LOG, "failed read of {0}".format(dmi_key), e)
         return None
 
 
@@ -2087,10 +2087,10 @@ def _call_dmidecode(key, dmidecode_path):
     try:
         cmd = [dmidecode_path, "--string", key]
         (result, _err) = subp(cmd)
-        LOG.debug("dmidecode returned '{}' for '{}'".format(result, key))
+        LOG.debug("dmidecode returned '{0}' for '{0}'".format(result, key))
         return result
     except OSError as _err:
-        LOG.debug('failed dmidecode cmd: {}\n{}'.format(cmd, _err.message))
+        LOG.debug('failed dmidecode cmd: {0}\n{0}'.format(cmd, _err.message))
         return None
 
 
@@ -2106,7 +2106,7 @@ def read_dmi_data(key):
     if dmidecode_path:
         return _call_dmidecode(key, dmidecode_path)
 
-    LOG.warn("did not find either path {} or dmidecode command".format(
+    LOG.warn("did not find either path {0} or dmidecode command".format(
              DMI_SYS_PATH))
 
     return None
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 828579e8..424d0626 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -39,8 +39,20 @@ else:
         PY3 = True
 
 if PY26:
-    # For now add these on, taken from python 2.7 + slightly adjusted
+    # For now add these on, taken from python 2.7 + slightly adjusted.  Drop
+    # all this once Python 2.6 is dropped as a minimum requirement.
     class TestCase(unittest.TestCase):
+        def setUp(self):
+            unittest.TestCase.setUp(self)
+            self.__all_cleanups = ExitStack()
+
+        def tearDown(self):
+            self.__all_cleanups.close()
+            unittest.TestCase.tearDown(self)
+
+        def addCleanup(self, function, *args, **kws):
+            self.__all_cleanups.callback(function, *args, **kws)
+
         def assertIs(self, expr1, expr2, msg=None):
             if expr1 is not expr2:
                 standardMsg = '%r is not %r' % (expr1, expr2)
@@ -63,6 +75,13 @@ if PY26:
                 standardMsg = standardMsg % (value)
                 self.fail(self._formatMessage(msg, standardMsg))
 
+        def assertIsInstance(self, obj, cls, msg=None):
+            """Same as self.assertTrue(isinstance(obj, cls)), with a nicer
+            default message."""
+            if not isinstance(obj, cls):
+                standardMsg = '%s is not an instance of %r' % (repr(obj), cls)
+                self.fail(self._formatMessage(msg, standardMsg))
+
         def assertDictContainsSubset(self, expected, actual, msg=None):
             missing = []
             mismatched = []
@@ -126,9 +145,9 @@ def retarget_many_wrapper(new_base, am, old_func):
     return wrapper
 
 
-class ResourceUsingTestCase(unittest.TestCase):
+class ResourceUsingTestCase(TestCase):
     def setUp(self):
-        unittest.TestCase.setUp(self)
+        TestCase.setUp(self)
         self.resource_path = None
 
     def resourceLocation(self, subname=None):
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index f5dc3435..1a307e56 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -18,6 +18,8 @@ from cloudinit import settings
 from cloudinit import url_helper
 from cloudinit import util
 
+from .helpers import TestCase
+
 
 class FakeModule(handlers.Handler):
     def __init__(self):
@@ -31,10 +33,10 @@ class FakeModule(handlers.Handler):
         pass
 
 
-class TestWalkerHandleHandler(unittest.TestCase):
+class TestWalkerHandleHandler(TestCase):
 
     def setUp(self):
-        unittest.TestCase.setUp(self)
+        super(TestWalkerHandleHandler, self).setUp()
         tmpdir = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, tmpdir)
 
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index 99fac84d..337ac9a0 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -1,7 +1,21 @@
+from __future__ import print_function
+
+import sys
 import unittest
 
 from cloudinit.cs_utils import Cepko
 
+try:
+    skip = unittest.skip
+except AttributeError:
+    # Python 2.6.  Doesn't have to be high fidelity.
+    def skip(reason):
+        def decorator(func):
+            def wrapper(*args, **kws):
+                print(reason, file=sys.stderr)
+            return wrapper
+        return decorator
+
 
 SERVER_CONTEXT = {
     "cpu": 1000,
@@ -29,7 +43,7 @@ class CepkoMock(Cepko):
 # 2015-01-22 BAW: This test is completely useless because it only ever tests
 # the CepkoMock object.  Even in its original form, I don't think it ever
 # touched the underlying Cepko class methods.
-@unittest.skip('This test is completely useless')
+@skip('This test is completely useless')
 class CepkoResultTests(unittest.TestCase):
     def setUp(self):
         pass
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 1f0330b3..97a53bee 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,7 +1,7 @@
 from cloudinit import helpers
 from cloudinit.util import load_file
 from cloudinit.sources import DataSourceAzure
-from ..helpers import populate_dir
+from ..helpers import TestCase, populate_dir
 
 try:
     from unittest import mock
@@ -84,9 +84,10 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
     return content
 
 
-class TestAzureDataSource(unittest.TestCase):
+class TestAzureDataSource(TestCase):
 
     def setUp(self):
+        super(TestAzureDataSource, self).setUp()
         self.tmp = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, self.tmp)
 
@@ -416,7 +417,7 @@ class TestAzureDataSource(unittest.TestCase):
             load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
 
 
-class TestReadAzureOvf(unittest.TestCase):
+class TestReadAzureOvf(TestCase):
     def test_invalid_xml_raises_non_azure_ds(self):
         invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
         self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
diff --git a/tests/unittests/test_datasource/test_cloudsigma.py b/tests/unittests/test_datasource/test_cloudsigma.py
index 306ac7d8..772d189a 100644
--- a/tests/unittests/test_datasource/test_cloudsigma.py
+++ b/tests/unittests/test_datasource/test_cloudsigma.py
@@ -39,6 +39,7 @@ class CepkoMock(Cepko):
 
 class DataSourceCloudSigmaTest(test_helpers.TestCase):
     def setUp(self):
+        super(DataSourceCloudSigmaTest, self).setUp()
         self.datasource = DataSourceCloudSigma.DataSourceCloudSigma("", "", "")
         self.datasource.is_running_in_cloudsigma = lambda: True
         self.datasource.cepko = CepkoMock(SERVER_CONTEXT)
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 258c68e2..fd930877 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -3,7 +3,6 @@ import json
 import os
 import shutil
 import tempfile
-import unittest
 
 try:
     from unittest import mock
@@ -20,6 +19,9 @@ from cloudinit.sources import DataSourceConfigDrive as ds
 from cloudinit.sources.helpers import openstack
 from cloudinit import util
 
+from ..helpers import TestCase
+
+
 PUBKEY = u'ssh-rsa AAAAB3NzaC1....sIkJhq8wdX+4I3A4cYbYP ubuntu@server-460\n'
 EC2_META = {
     'ami-id': 'ami-00000001',
@@ -70,7 +72,7 @@ CFG_DRIVE_FILES_V2 = {
   'openstack/latest/user_data': USER_DATA}
 
 
-class TestConfigDriveDataSource(unittest.TestCase):
+class TestConfigDriveDataSource(TestCase):
 
     def setUp(self):
         super(TestConfigDriveDataSource, self).setUp()
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index 6af0cd82..d25e1adc 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -2,11 +2,10 @@ from copy import copy
 import os
 import shutil
 import tempfile
-import unittest
 
 from cloudinit.sources import DataSourceMAAS
 from cloudinit import url_helper
-from ..helpers import populate_dir
+from ..helpers import TestCase, populate_dir
 
 try:
     from unittest import mock
@@ -14,7 +13,7 @@ except ImportError:
     import mock
 
 
-class TestMAASDataSource(unittest.TestCase):
+class TestMAASDataSource(TestCase):
 
     def setUp(self):
         super(TestMAASDataSource, self).setUp()
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 480a4012..4f967f58 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -1,7 +1,7 @@
 from cloudinit import helpers
 from cloudinit.sources import DataSourceNoCloud
 from cloudinit import util
-from ..helpers import populate_dir
+from ..helpers import TestCase, populate_dir
 
 import os
 import yaml
@@ -19,9 +19,10 @@ except ImportError:
     from contextlib2 import ExitStack
 
 
-class TestNoCloudDataSource(unittest.TestCase):
+class TestNoCloudDataSource(TestCase):
 
     def setUp(self):
+        super(TestNoCloudDataSource, self).setUp()
         self.tmp = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, self.tmp)
         self.paths = helpers.Paths({'cloud_dir': self.tmp})
@@ -34,8 +35,6 @@ class TestNoCloudDataSource(unittest.TestCase):
         self.mocks.enter_context(
             mock.patch.object(util, 'get_cmdline', return_value=self.cmdline))
 
-        super(TestNoCloudDataSource, self).setUp()
-
     def test_nocloud_seed_dir(self):
         md = {'instance-id': 'IID', 'dsmode': 'local'}
         ud = "USER_DATA_HERE"
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index ef534bab..e5a4bd18 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -1,7 +1,7 @@
 from cloudinit import helpers
 from cloudinit.sources import DataSourceOpenNebula as ds
 from cloudinit import util
-from ..helpers import populate_dir
+from ..helpers import TestCase, populate_dir
 
 from base64 import b64encode
 import os
@@ -46,7 +46,7 @@ CMD_IP_OUT = '''\
 '''
 
 
-class TestOpenNebulaDataSource(unittest.TestCase):
+class TestOpenNebulaDataSource(TestCase):
     parsed_user = None
 
     def setUp(self):
diff --git a/tests/unittests/test_distros/test_netconfig.py b/tests/unittests/test_distros/test_netconfig.py
index 91e630ae..6d30c5b8 100644
--- a/tests/unittests/test_distros/test_netconfig.py
+++ b/tests/unittests/test_distros/test_netconfig.py
@@ -1,5 +1,4 @@
 import os
-import unittest
 
 try:
     from unittest import mock
@@ -11,6 +10,7 @@ except ImportError:
     from contextlib2 import ExitStack
 
 from six import StringIO
+from ..helpers import TestCase
 
 from cloudinit import distros
 from cloudinit import helpers
@@ -80,7 +80,7 @@ class WriteBuffer(object):
         return self.buffer.getvalue()
 
 
-class TestNetCfgDistro(unittest.TestCase):
+class TestNetCfgDistro(TestCase):
 
     def _get_distro(self, dname):
         cls = distros.fetch(dname)
diff --git a/tests/unittests/test_distros/test_resolv.py b/tests/unittests/test_distros/test_resolv.py
index 779b83e3..faaf5b7f 100644
--- a/tests/unittests/test_distros/test_resolv.py
+++ b/tests/unittests/test_distros/test_resolv.py
@@ -1,7 +1,7 @@
 from cloudinit.distros.parsers import resolv_conf
 
 import re
-import unittest
+from ..helpers import TestCase
 
 
 BASE_RESOLVE = '''
@@ -13,7 +13,7 @@ nameserver 10.15.30.92
 BASE_RESOLVE = BASE_RESOLVE.strip()
 
 
-class TestResolvHelper(unittest.TestCase):
+class TestResolvHelper(TestCase):
     def test_parse_same(self):
         rp = resolv_conf.ResolvConf(BASE_RESOLVE)
         rp_r = str(rp).strip()
diff --git a/tests/unittests/test_distros/test_sysconfig.py b/tests/unittests/test_distros/test_sysconfig.py
index f66201b3..03d89a10 100644
--- a/tests/unittests/test_distros/test_sysconfig.py
+++ b/tests/unittests/test_distros/test_sysconfig.py
@@ -1,13 +1,13 @@
 import re
-import unittest
 
 from cloudinit.distros.parsers.sys_conf import SysConf
+from ..helpers import TestCase
 
 
 # Lots of good examples @
 # http://content.hccfl.edu/pollock/AUnix1/SysconfigFilesDesc.txt
 
-class TestSysConfHelper(unittest.TestCase):
+class TestSysConfHelper(TestCase):
     # This function was added in 2.7, make it work for 2.6
     def assertRegMatches(self, text, regexp):
         regexp = re.compile(regexp)
diff --git a/tests/unittests/test_distros/test_user_data_normalize.py b/tests/unittests/test_distros/test_user_data_normalize.py
index b90d6185..e4488e2a 100644
--- a/tests/unittests/test_distros/test_user_data_normalize.py
+++ b/tests/unittests/test_distros/test_user_data_normalize.py
@@ -1,9 +1,10 @@
-import unittest
-
 from cloudinit import distros
 from cloudinit import helpers
 from cloudinit import settings
 
+from ..helpers import TestCase
+
+
 bcfg = {
    'name': 'bob',
    'plain_text_passwd': 'ubuntu',
@@ -15,7 +16,7 @@ bcfg = {
 }
 
 
-class TestUGNormalize(unittest.TestCase):
+class TestUGNormalize(TestCase):
 
     def _make_distro(self, dtype, def_user=None):
         cfg = dict(settings.CFG_BUILTIN)
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index d72fa8c7..6bccff11 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -1,6 +1,7 @@
 from cloudinit import util
 
 from cloudinit.config import cc_apt_configure
+from ..helpers import TestCase
 
 import os
 import re
@@ -9,7 +10,7 @@ import tempfile
 import unittest
 
 
-class TestAptProxyConfig(unittest.TestCase):
+class TestAptProxyConfig(TestCase):
     def setUp(self):
         super(TestAptProxyConfig, self).setUp()
         self.tmp = tempfile.mkdtemp()
diff --git a/tests/unittests/test_handler/test_handler_ca_certs.py b/tests/unittests/test_handler/test_handler_ca_certs.py
index 97213a0c..a6b9c0fd 100644
--- a/tests/unittests/test_handler/test_handler_ca_certs.py
+++ b/tests/unittests/test_handler/test_handler_ca_certs.py
@@ -3,6 +3,7 @@ from cloudinit import helpers
 from cloudinit import util
 
 from cloudinit.config import cc_ca_certs
+from ..helpers import TestCase
 
 import logging
 import shutil
@@ -45,7 +46,7 @@ class TestNoConfig(unittest.TestCase):
             self.assertEqual(certs_mock.call_count, 0)
 
 
-class TestConfig(unittest.TestCase):
+class TestConfig(TestCase):
     def setUp(self):
         super(TestConfig, self).setUp()
         self.name = "ca-certs"
@@ -139,7 +140,7 @@ class TestConfig(unittest.TestCase):
         self.assertEqual(self.mock_remove.call_count, 1)
 
 
-class TestAddCaCerts(unittest.TestCase):
+class TestAddCaCerts(TestCase):
 
     def setUp(self):
         super(TestAddCaCerts, self).setUp()
@@ -241,7 +242,7 @@ class TestUpdateCaCerts(unittest.TestCase):
                 ["update-ca-certificates"], capture=False)
 
 
-class TestRemoveDefaultCaCerts(unittest.TestCase):
+class TestRemoveDefaultCaCerts(TestCase):
 
     def setUp(self):
         super(TestRemoveDefaultCaCerts, self).setUp()
diff --git a/tests/unittests/test_handler/test_handler_growpart.py b/tests/unittests/test_handler/test_handler_growpart.py
index 89727863..bef0d80d 100644
--- a/tests/unittests/test_handler/test_handler_growpart.py
+++ b/tests/unittests/test_handler/test_handler_growpart.py
@@ -2,6 +2,7 @@ from cloudinit import cloud
 from cloudinit import util
 
 from cloudinit.config import cc_growpart
+from ..helpers import TestCase
 
 import errno
 import logging
@@ -72,7 +73,7 @@ class TestDisabled(unittest.TestCase):
             self.assertEqual(mockobj.call_count, 0)
 
 
-class TestConfig(unittest.TestCase):
+class TestConfig(TestCase):
     def setUp(self):
         super(TestConfig, self).setUp()
         self.name = "growpart"
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
index 38a56dc2..d38260e6 100644
--- a/tests/unittests/test_pathprefix2dict.py
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -1,15 +1,15 @@
 from cloudinit import util
 
-from .helpers import populate_dir
+from .helpers import TestCase, populate_dir
 
 import shutil
 import tempfile
-import unittest
 
 
-class TestPathPrefix2Dict(unittest.TestCase):
+class TestPathPrefix2Dict(TestCase):
 
     def setUp(self):
+        TestCase.setUp(self)
         self.tmp = tempfile.mkdtemp()
         self.addCleanup(shutil.rmtree, self.tmp)
 
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index 957467f6..fbad405f 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -16,6 +16,9 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+from __future__ import print_function
+
+import sys
 import six
 import unittest
 
@@ -24,6 +27,20 @@ import textwrap
 
 from cloudinit import templater
 
+try:
+    skipIf = unittest.skipIf
+except AttributeError:
+    # Python 2.6.  Doesn't have to be high fidelity.
+    def skipIf(condition, reason):
+        def decorator(func):
+            def wrapper(*args, **kws):
+                if condition:
+                    return func(*args, **kws)
+                else:
+                    print(reason, file=sys.stderr)
+            return wrapper
+        return decorator
+
 
 class TestTemplates(test_helpers.TestCase):
     def test_render_basic(self):
@@ -41,7 +58,7 @@ class TestTemplates(test_helpers.TestCase):
         out_data = templater.basic_render(in_data, {'b': 2})
         self.assertEqual(expected_data.strip(), out_data)
 
-    @unittest.skipIf(six.PY3, 'Cheetah is not compatible with Python 3')
+    @skipIf(six.PY3, 'Cheetah is not compatible with Python 3')
     def test_detection(self):
         blob = "## template:cheetah"
 
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 7a224230..a1bd2c46 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -7,7 +7,6 @@ import shutil
 import tempfile
 
 from . import helpers
-import unittest
 import six
 
 try:
@@ -38,7 +37,7 @@ class FakeSelinux(object):
         self.restored.append(path)
 
 
-class TestGetCfgOptionListOrStr(unittest.TestCase):
+class TestGetCfgOptionListOrStr(helpers.TestCase):
     def test_not_found_no_default(self):
         """None is returned if key is not found and no default given."""
         config = {}
@@ -70,7 +69,7 @@ class TestGetCfgOptionListOrStr(unittest.TestCase):
         self.assertEqual([], result)
 
 
-class TestWriteFile(unittest.TestCase):
+class TestWriteFile(helpers.TestCase):
     def setUp(self):
         super(TestWriteFile, self).setUp()
         self.tmp = tempfile.mkdtemp()
@@ -149,7 +148,7 @@ class TestWriteFile(unittest.TestCase):
         mockobj.assert_called_once_with('selinux')
 
 
-class TestDeleteDirContents(unittest.TestCase):
+class TestDeleteDirContents(helpers.TestCase):
     def setUp(self):
         super(TestDeleteDirContents, self).setUp()
         self.tmp = tempfile.mkdtemp()
@@ -215,20 +214,20 @@ class TestDeleteDirContents(unittest.TestCase):
         self.assertDirEmpty(self.tmp)
 
 
-class TestKeyValStrings(unittest.TestCase):
+class TestKeyValStrings(helpers.TestCase):
     def test_keyval_str_to_dict(self):
         expected = {'1': 'one', '2': 'one+one', 'ro': True}
         cmdline = "1=one ro 2=one+one"
         self.assertEqual(expected, util.keyval_str_to_dict(cmdline))
 
 
-class TestGetCmdline(unittest.TestCase):
+class TestGetCmdline(helpers.TestCase):
     def test_cmdline_reads_debug_env(self):
         os.environ['DEBUG_PROC_CMDLINE'] = 'abcd 123'
         self.assertEqual(os.environ['DEBUG_PROC_CMDLINE'], util.get_cmdline())
 
 
-class TestLoadYaml(unittest.TestCase):
+class TestLoadYaml(helpers.TestCase):
     mydefault = "7b03a8ebace993d806255121073fed52"
 
     def test_simple(self):
@@ -335,7 +334,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
         self._patchIn(new_root)
         util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
 
-        dmi_key = "/sys/class/dmi/id/{}".format(key)
+        dmi_key = "/sys/class/dmi/id/{0}".format(key)
         util.write_file(dmi_key, content)
 
     def _no_syspath(self, key, content):
diff --git a/tox.ini b/tox.ini
index e547c693..d04cd47c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -11,3 +11,13 @@ deps =
      nose
      pep8==1.5.7
      pyflakes
+
+[testenv:py26]
+commands = nosetests tests
+deps =
+     contextlib2
+     httpretty>=0.7.1
+     mock
+     nose
+     pep8==1.5.7
+     pyflakes
-- 
cgit v1.2.3


From 1e76dad45e3bce4dac5a638dda970fc02a044dbb Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 14:24:22 -0500
Subject: Respond to review:

- Remove str() wrappers to second argument to write_files() where it is no
  longer necessary.

Also: Fixed a couple of other octal literals which clearly weren't being
tested.
---
 cloudinit/config/cc_chef.py                | 2 +-
 cloudinit/config/cc_puppet.py              | 2 +-
 cloudinit/config/cc_rightscale_userdata.py | 2 +-
 cloudinit/config/cc_runcmd.py              | 2 +-
 cloudinit/config/cc_salt_minion.py         | 2 +-
 cloudinit/distros/arch.py                  | 2 +-
 cloudinit/distros/gentoo.py                | 2 +-
 cloudinit/distros/rhel_util.py             | 2 +-
 cloudinit/user_data.py                     | 2 +-
 9 files changed, 9 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_chef.py b/cloudinit/config/cc_chef.py
index 584199e5..e18c5405 100644
--- a/cloudinit/config/cc_chef.py
+++ b/cloudinit/config/cc_chef.py
@@ -302,7 +302,7 @@ def install_chef(cloud, chef_cfg, log):
         with util.tempdir() as tmpd:
             # Use tmpdir over tmpfile to avoid 'text file busy' on execute
             tmpf = "%s/chef-omnibus-install" % tmpd
-            util.write_file(tmpf, str(content), mode=0o700)
+            util.write_file(tmpf, content, mode=0o700)
             util.subp([tmpf], capture=False)
     else:
         log.warn("Unknown chef install type '%s'", install_type)
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 6f1b3c57..4501598e 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -91,7 +91,7 @@ def handle(name, cfg, cloud, log, _args):
                 util.chownbyname(PUPPET_SSL_DIR, 'puppet', 'root')
                 util.ensure_dir(PUPPET_SSL_CERT_DIR)
                 util.chownbyname(PUPPET_SSL_CERT_DIR, 'puppet', 'root')
-                util.write_file(PUPPET_SSL_CERT_PATH, str(cfg))
+                util.write_file(PUPPET_SSL_CERT_PATH, cfg)
                 util.chownbyname(PUPPET_SSL_CERT_PATH, 'puppet', 'root')
             else:
                 # Iterate throug the config items, we'll use ConfigParser.set
diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 7d2ec10a..1f769c0a 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -82,7 +82,7 @@ def handle(name, _cfg, cloud, log, _args):
             resp = uhelp.readurl(url)
             # Ensure its a valid http response (and something gotten)
             if resp.ok() and resp.contents:
-                util.write_file(fname, str(resp), mode=0700)
+                util.write_file(fname, resp, mode=0o700)
                 wrote_fns.append(fname)
         except Exception as e:
             captured_excps.append(e)
diff --git a/cloudinit/config/cc_runcmd.py b/cloudinit/config/cc_runcmd.py
index 598c3a3e..66dc3363 100644
--- a/cloudinit/config/cc_runcmd.py
+++ b/cloudinit/config/cc_runcmd.py
@@ -33,6 +33,6 @@ def handle(name, cfg, cloud, log, _args):
     cmd = cfg["runcmd"]
     try:
         content = util.shellify(cmd)
-        util.write_file(out_fn, content, 0700)
+        util.write_file(out_fn, content, 0o700)
     except:
         util.logexc(log, "Failed to shellify %s into file %s", cmd, out_fn)
diff --git a/cloudinit/config/cc_salt_minion.py b/cloudinit/config/cc_salt_minion.py
index 53013dcb..f5786a31 100644
--- a/cloudinit/config/cc_salt_minion.py
+++ b/cloudinit/config/cc_salt_minion.py
@@ -47,7 +47,7 @@ def handle(name, cfg, cloud, log, _args):
     # ... copy the key pair if specified
     if 'public_key' in salt_cfg and 'private_key' in salt_cfg:
         pki_dir = salt_cfg.get('pki_dir', '/etc/salt/pki')
-        with util.umask(077):
+        with util.umask(0o77):
             util.ensure_dir(pki_dir)
             pub_name = os.path.join(pki_dir, 'minion.pub')
             pem_name = os.path.join(pki_dir, 'minion.pem')
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index e540e0bc..45fcf26f 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -129,7 +129,7 @@ class Distro(distros.Distro):
         if not conf:
             conf = HostnameConf('')
         conf.set_hostname(your_hostname)
-        util.write_file(out_fn, str(conf), 0644)
+        util.write_file(out_fn, conf, 0o644)
 
     def _read_system_hostname(self):
         sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 09dd0d73..9e80583c 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -108,7 +108,7 @@ class Distro(distros.Distro):
         if not conf:
             conf = HostnameConf('')
         conf.set_hostname(your_hostname)
-        util.write_file(out_fn, str(conf), 0644)
+        util.write_file(out_fn, conf, 0o644)
 
     def _read_system_hostname(self):
         sys_hostname = self._read_hostname(self.hostname_conf_fn)
diff --git a/cloudinit/distros/rhel_util.py b/cloudinit/distros/rhel_util.py
index 903d7793..84aad623 100644
--- a/cloudinit/distros/rhel_util.py
+++ b/cloudinit/distros/rhel_util.py
@@ -86,4 +86,4 @@ def update_resolve_conf_file(fn, dns_servers, search_servers):
                 r_conf.add_search_domain(s)
             except ValueError:
                 util.logexc(LOG, "Failed at adding search domain %s", s)
-    util.write_file(fn, str(r_conf), 0o644)
+    util.write_file(fn, r_conf, 0o644)
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 3f860f3b..bf5642a5 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -248,7 +248,7 @@ class UserDataProcessor(object):
                 resp = util.read_file_or_url(include_url,
                                              ssl_details=self.ssl_details)
                 if include_once_on and resp.ok():
-                    util.write_file(include_once_fn, str(resp), mode=0o600)
+                    util.write_file(include_once_fn, resp, mode=0o600)
                 if resp.ok():
                     content = str(resp)
                 else:
-- 
cgit v1.2.3


From 542f2f1b83660ec3de7267c0756bc62a05d37683 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 14:34:25 -0500
Subject: Remove some unused code.

---
 cloudinit/helpers.py | 3 ---
 1 file changed, 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index ed396b5a..5e99d185 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -321,9 +321,6 @@ class ContentHandlers(object):
     def items(self):
         return list(self.registered.items())
 
-    # XXX This should really go away.
-    iteritems = items
-
 
 class Paths(object):
     def __init__(self, path_cfgs, ds=None):
-- 
cgit v1.2.3


From c3ced2d4bdbbbdcb2466202e1571d4ea7bfc7c72 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 14:36:10 -0500
Subject: Remove a comment turd.

---
 cloudinit/sources/DataSourceMAAS.py | 20 --------------------
 1 file changed, 20 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 39296f08..082cc58f 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -284,26 +284,6 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
     uri, signed_headers, body = client.sign(url)
     return signed_headers
 
-    ## consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
-    ## token = oauth.OAuthToken(token_key, token_secret)
-
-    ## if timestamp is None:
-    ##     ts = int(time.time())
-    ## else:
-    ##     ts = timestamp
-
-    ## params = {
-    ##     'oauth_version': "1.0",
-    ##     'oauth_nonce': oauth.generate_nonce(),
-    ##     'oauth_timestamp': ts,
-    ##     'oauth_token': token.key,
-    ##     'oauth_consumer_key': consumer.key,
-    ## }
-    ## req = oauth.OAuthRequest(http_url=url, parameters=params)
-    ## req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
-    ##                  consumer, token)
-    ## return req.to_header()
-
 
 class MAASSeedDirNone(Exception):
     pass
-- 
cgit v1.2.3


From 69c64029997599b3f1764ef48fe571094e2ee5f2 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 14:40:05 -0500
Subject: Respond to review:

- Just use util.load_file() instead of yet another way to open and read the
  file.
---
 cloudinit/sources/DataSourceOpenNebula.py | 12 +-----------
 1 file changed, 1 insertion(+), 11 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 6da569ec..a0275cda 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -25,7 +25,6 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import base64
-import codecs
 import os
 import pwd
 import re
@@ -35,7 +34,6 @@ from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import util
 
-import six
 
 LOG = logging.getLogger(__name__)
 
@@ -46,12 +44,6 @@ CONTEXT_DISK_FILES = ["context.sh"]
 VALID_DSMODES = ("local", "net", "disabled")
 
 
-def utf8_open(path):
-    if six.PY3:
-        return open(path, 'r', encoding='utf-8')
-    return codecs.open(path, 'r', encoding='utf-8')
-
-
 class DataSourceOpenNebula(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -389,9 +381,7 @@ def read_context_disk_dir(source_dir, asuser=None):
                                            "does not exist", asuser)
         try:
             path = os.path.join(source_dir, 'context.sh')
-            with utf8_open(path) as f:
-                content = f.read().strip()
-
+            content = util.load_file(path)
             context = parse_shell_config(content, asuser=asuser)
         except util.ProcessExecutionError as e:
             raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
-- 
cgit v1.2.3


From 6e742d20e9ed56498925c7c850cd5da65d063b4b Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 15:03:52 -0500
Subject: Respond to review:

- Refactor both the base64 encoding and decoding into utility functions.

Also:

- Mechanically fix some other broken untested code.
---
 cloudinit/config/cc_seed_random.py                 |  8 +------
 cloudinit/config/cc_ssh_authkey_fingerprints.py    |  2 +-
 cloudinit/sources/DataSourceOpenNebula.py          |  7 +-----
 cloudinit/sources/DataSourceSmartOS.py             | 11 +--------
 cloudinit/util.py                                  | 20 ++++++++++++++++
 tests/unittests/test_datasource/test_azure.py      | 28 ++++++++--------------
 tests/unittests/test_datasource/test_opennebula.py | 11 ++-------
 tests/unittests/test_datasource/test_smartos.py    | 14 ++++-------
 .../test_handler/test_handler_seed_random.py       | 12 ++--------
 9 files changed, 42 insertions(+), 71 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 981e1b08..bb64b0f5 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -38,13 +38,7 @@ def _decode(data, encoding=None):
     if not encoding or encoding.lower() in ['raw']:
         return data
     elif encoding.lower() in ['base64', 'b64']:
-        # Try to give us a native string in both Python 2 and 3, and remember
-        # that b64decode() returns bytes in Python 3.
-        decoded = base64.b64decode(data)
-        try:
-            return decoded.decode('utf-8')
-        except UnicodeDecodeError:
-            return decoded
+        return util.b64d(data)
     elif encoding.lower() in ['gzip', 'gz']:
         return util.decomp_gzip(data, quiet=False)
     else:
diff --git a/cloudinit/config/cc_ssh_authkey_fingerprints.py b/cloudinit/config/cc_ssh_authkey_fingerprints.py
index 51580633..6ce831bc 100644
--- a/cloudinit/config/cc_ssh_authkey_fingerprints.py
+++ b/cloudinit/config/cc_ssh_authkey_fingerprints.py
@@ -32,7 +32,7 @@ from cloudinit import util
 
 def _split_hash(bin_hash):
     split_up = []
-    for i in xrange(0, len(bin_hash), 2):
+    for i in range(0, len(bin_hash), 2):
         split_up.append(bin_hash[i:i + 2])
     return split_up
 
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index a0275cda..61709c1b 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -426,12 +426,7 @@ def read_context_disk_dir(source_dir, asuser=None):
                                context.get('USER_DATA_ENCODING'))
         if encoding == "base64":
             try:
-                userdata = base64.b64decode(results['userdata'])
-                # In Python 3 we still expect a str, but b64decode will return
-                # bytes.  Convert to str.
-                if isinstance(userdata, bytes):
-                    userdata = userdata.decode('utf-8')
-                results['userdata'] = userdata
+                results['userdata'] = util.b64d(results['userdata'])
             except TypeError:
                 LOG.warn("Failed base64 decoding of userdata")
 
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index f59ad3d6..9d48beab 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -351,16 +351,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
 
     if b64:
         try:
-            # Generally, we want native strings in the values.  Python 3's
-            # b64decode will return bytes though, so decode them to utf-8 if
-            # possible.  If that fails, return the bytes.
-            decoded = base64.b64decode(resp)
-            try:
-                if isinstance(decoded, bytes):
-                    return decoded.decode('utf-8')
-            except UnicodeDecodeError:
-                pass
-            return decoded
+            return util.b64d(resp)
         # Bogus input produces different errors in Python 2 and 3; catch both.
         except (TypeError, binascii.Error):
             LOG.warn("Failed base64 decoding key '%s'", noun)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 766f8e32..8916cc11 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -44,6 +44,7 @@ import sys
 import tempfile
 import time
 
+from base64 import b64decode, b64encode
 from six.moves.urllib import parse as urlparse
 
 import six
@@ -90,6 +91,25 @@ def encode_text(text, encoding='utf-8'):
         return text
     return text.encode(encoding)
 
+
+def b64d(source):
+    # Base64 decode some data, accepting bytes or unicode/str, and returning
+    # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
+    decoded = b64decode(source)
+    if isinstance(decoded, bytes):
+        try:
+            return decoded.decode('utf-8')
+        except UnicodeDecodeError:
+            return decoded
+
+def b64e(source):
+    # Base64 encode some data, accepting bytes or unicode/str, and returning
+    # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
+    if not isinstance(source, bytes):
+        source = source.encode('utf-8')
+    return b64encode(source).decode('utf-8')
+
+
 # Path for DMI Data
 DMI_SYS_PATH = "/sys/class/dmi/id"
 
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 97a53bee..965bce4b 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,5 +1,5 @@
 from cloudinit import helpers
-from cloudinit.util import load_file
+from cloudinit.util import b64e, load_file
 from cloudinit.sources import DataSourceAzure
 from ..helpers import TestCase, populate_dir
 
@@ -12,7 +12,6 @@ try:
 except ImportError:
     from contextlib2 import ExitStack
 
-import base64
 import crypt
 import os
 import stat
@@ -22,13 +21,6 @@ import tempfile
 import unittest
 
 
-def b64(source):
-    # In Python 3, b64encode only accepts bytes and returns bytes.
-    if not isinstance(source, bytes):
-        source = source.encode('utf-8')
-    return base64.b64encode(source).decode('us-ascii')
-
-
 def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
     if data is None:
         data = {'HostName': 'FOOHOST'}
@@ -58,7 +50,7 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
         content += "<%s%s>%s</%s>\n" % (key, attrs, val, key)
 
     if userdata:
-        content += "<UserData>%s</UserData>\n" % (b64(userdata))
+        content += "<UserData>%s</UserData>\n" % (b64e(userdata))
 
     if pubkeys:
         content += "<SSH><PublicKeys>\n"
@@ -189,7 +181,7 @@ class TestAzureDataSource(TestCase):
         # set dscfg in via base64 encoded yaml
         cfg = {'agent_command': "my_command"}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': b64(yaml.dump(cfg)),
+                'dscfg': {'text': b64e(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
@@ -241,7 +233,7 @@ class TestAzureDataSource(TestCase):
 
     def test_userdata_found(self):
         mydata = "FOOBAR"
-        odata = {'UserData': b64(mydata)}
+        odata = {'UserData': b64e(mydata)}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         dsrc = self._get_ds(data)
@@ -289,7 +281,7 @@ class TestAzureDataSource(TestCase):
                                    'command': 'my-bounce-command',
                                    'hostname_command': 'my-hostname-command'}}
         odata = {'HostName': "xhost",
-                'dscfg': {'text': b64(yaml.dump(cfg)),
+                'dscfg': {'text': b64e(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
         self._get_ds(data).get_data()
@@ -304,7 +296,7 @@ class TestAzureDataSource(TestCase):
         # config specifying set_hostname off should not bounce
         cfg = {'set_hostname': False}
         odata = {'HostName': "xhost",
-                'dscfg': {'text': b64(yaml.dump(cfg)),
+                'dscfg': {'text': b64e(yaml.dump(cfg)),
                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
         self._get_ds(data).get_data()
@@ -333,7 +325,7 @@ class TestAzureDataSource(TestCase):
         # Make sure that user can affect disk aliases
         dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': b64(yaml.dump(dscfg)),
+                'dscfg': {'text': b64e(yaml.dump(dscfg)),
                           'encoding': 'base64'}}
         usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
                                   'ephemeral0': False}}
@@ -370,7 +362,7 @@ class TestAzureDataSource(TestCase):
 
     def test_existing_ovf_same(self):
         # waagent/SharedConfig left alone if found ovf-env.xml same as cached
-        odata = {'UserData': b64("SOMEUSERDATA")}
+        odata = {'UserData': b64e("SOMEUSERDATA")}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         populate_dir(self.waagent_d,
@@ -394,9 +386,9 @@ class TestAzureDataSource(TestCase):
         # 'get_data' should remove SharedConfig.xml in /var/lib/waagent
         # if ovf-env.xml differs.
         cached_ovfenv = construct_valid_ovf_env(
-            {'userdata': b64("FOO_USERDATA")})
+            {'userdata': b64e("FOO_USERDATA")})
         new_ovfenv = construct_valid_ovf_env(
-            {'userdata': b64("NEW_USERDATA")})
+            {'userdata': b64e("NEW_USERDATA")})
 
         populate_dir(self.waagent_d,
             {'ovf-env.xml': cached_ovfenv,
diff --git a/tests/unittests/test_datasource/test_opennebula.py b/tests/unittests/test_datasource/test_opennebula.py
index e5a4bd18..27adf21b 100644
--- a/tests/unittests/test_datasource/test_opennebula.py
+++ b/tests/unittests/test_datasource/test_opennebula.py
@@ -3,19 +3,12 @@ from cloudinit.sources import DataSourceOpenNebula as ds
 from cloudinit import util
 from ..helpers import TestCase, populate_dir
 
-from base64 import b64encode
 import os
 import pwd
 import shutil
 import tempfile
 import unittest
 
-def b64(source):
-    # In Python 3, b64encode only accepts bytes and returns bytes.
-    if not isinstance(source, bytes):
-        source = source.encode('utf-8')
-    return b64encode(source).decode('us-ascii')
-
 
 TEST_VARS = {
     'VAR1': 'single',
@@ -186,7 +179,7 @@ class TestOpenNebulaDataSource(TestCase):
             self.assertEqual(USER_DATA, results['userdata'])
 
     def test_user_data_encoding_required_for_decode(self):
-        b64userdata = b64(USER_DATA)
+        b64userdata = util.b64e(USER_DATA)
         for k in ('USER_DATA', 'USERDATA'):
             my_d = os.path.join(self.tmp, k)
             populate_context_dir(my_d, {k: b64userdata})
@@ -198,7 +191,7 @@ class TestOpenNebulaDataSource(TestCase):
     def test_user_data_base64_encoding(self):
         for k in ('USER_DATA', 'USERDATA'):
             my_d = os.path.join(self.tmp, k)
-            populate_context_dir(my_d, {k: b64(USER_DATA),
+            populate_context_dir(my_d, {k: util.b64e(USER_DATA),
                                         'USERDATA_ENCODING': 'base64'})
             results = ds.read_context_disk_dir(my_d)
 
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index b5ebf94d..8b62b1b1 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -24,9 +24,9 @@
 
 from __future__ import print_function
 
-import base64
 from cloudinit import helpers as c_helpers
 from cloudinit.sources import DataSourceSmartOS
+from cloudinit.util import b64e
 from .. import helpers
 import os
 import os.path
@@ -36,12 +36,6 @@ import tempfile
 import stat
 import uuid
 
-def b64(source):
-    # In Python 3, b64encode only accepts bytes and returns bytes.
-    if not isinstance(source, bytes):
-        source = source.encode('utf-8')
-    return base64.b64encode(source).decode('us-ascii')
-
 
 MOCK_RETURNS = {
     'hostname': 'test-host',
@@ -239,7 +233,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns = MOCK_RETURNS.copy()
         my_returns['base64_all'] = "true"
         for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = b64(my_returns[k])
+            my_returns[k] = b64e(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
@@ -260,7 +254,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns['b64-cloud-init:user-data'] = "true"
         my_returns['b64-hostname'] = "true"
         for k in ('hostname', 'cloud-init:user-data'):
-            my_returns[k] = b64(my_returns[k])
+            my_returns[k] = b64e(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
@@ -276,7 +270,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         my_returns = MOCK_RETURNS.copy()
         my_returns['base64_keys'] = 'hostname,ignored'
         for k in ('hostname',):
-            my_returns[k] = b64(my_returns[k])
+            my_returns[k] = b64e(my_returns[k])
 
         dsrc = self._get_ds(mockdata=my_returns)
         ret = dsrc.get_data()
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index d3f18fa0..0bcdcb31 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -18,7 +18,6 @@
 
 from cloudinit.config import cc_seed_random
 
-import base64
 import gzip
 import tempfile
 
@@ -38,13 +37,6 @@ import logging
 LOG = logging.getLogger(__name__)
 
 
-def b64(source):
-    # In Python 3, b64encode only accepts bytes and returns bytes.
-    if not isinstance(source, bytes):
-        source = source.encode('utf-8')
-    return base64.b64encode(source).decode('us-ascii')
-
-
 class TestRandomSeed(t_help.TestCase):
     def setUp(self):
         super(TestRandomSeed, self).setUp()
@@ -141,7 +133,7 @@ class TestRandomSeed(t_help.TestCase):
         self.assertEquals("big-toe", contents)
 
     def test_append_random_base64(self):
-        data = b64('bubbles')
+        data = util.b64e('bubbles')
         cfg = {
             'random_seed': {
                 'file': self._seed_file,
@@ -154,7 +146,7 @@ class TestRandomSeed(t_help.TestCase):
         self.assertEquals("bubbles", contents)
 
     def test_append_random_b64(self):
-        data = b64('kit-kat')
+        data = util.b64e('kit-kat')
         cfg = {
             'random_seed': {
                 'file': self._seed_file,
-- 
cgit v1.2.3


From 96d130e7732f1242d71c65a32412ae56cb229abf Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 15:11:53 -0500
Subject: Respond to review:

- Refactor "fully" decoding the payload of a text/* part.  In Python 3,
  decode=True only means to decode according to Content-Transfer-Encoding, not
  according to any charset in the Content-Type header.  So do that.
---
 cloudinit/handlers/__init__.py | 11 +----------
 cloudinit/user_data.py         | 12 +-----------
 cloudinit/util.py              | 15 +++++++++++++++
 3 files changed, 17 insertions(+), 21 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index cdccf122..6b7abbcd 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -233,16 +233,7 @@ def walk(msg, callback, data):
         headers = dict(part)
         LOG.debug(headers)
         headers['Content-Type'] = ctype
-        payload = part.get_payload(decode=True)
-        # In Python 3, decoding the payload will ironically hand us a bytes
-        # object.  'decode' means to decode according to
-        # Content-Transfer-Encoding, not according to any charset in the
-        # Content-Type.  So, if we end up with bytes, first try to decode to
-        # str via CT charset, and failing that, try utf-8 using surrogate
-        # escapes.
-        if six.PY3 and isinstance(payload, bytes):
-            charset = part.get_charset() or 'utf-8'
-            payload = payload.decode(charset, errors='surrogateescape')
+        payload = util.fully_decoded_payload(part)
         callback(data, filename, payload, headers)
         partnum = partnum + 1
 
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index bf5642a5..5fdc46f2 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -108,17 +108,7 @@ class UserDataProcessor(object):
 
             ctype = None
             ctype_orig = part.get_content_type()
-            ctype_main = part.get_content_maintype()
-            payload = part.get_payload(decode=True)
-            # In Python 3, decoding the payload will ironically hand us a
-            # bytes object.  'decode' means to decode according to
-            # Content-Transfer-Encoding, not according to any charset in the
-            # Content-Type.  So, if we end up with bytes, first try to decode
-            # to str via CT charset, and failing that, try utf-8 using
-            # surrogate escapes.
-            if six.PY3 and ctype_main == 'text' and isinstance(payload, bytes):
-                charset = part.get_charset() or 'utf-8'
-                payload = payload.decode(charset, errors='surrogateescape')
+            payload = util.fully_decoded_payload(part)
             was_compressed = False
 
             # When the message states it is of a gzipped content type ensure
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 8916cc11..3a921afe 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -110,6 +110,21 @@ def b64e(source):
     return b64encode(source).decode('utf-8')
 
 
+def fully_decoded_payload(part):
+    # In Python 3, decoding the payload will ironically hand us a bytes object.
+    # 'decode' means to decode according to Content-Transfer-Encoding, not
+    # according to any charset in the Content-Type.  So, if we end up with
+    # bytes, first try to decode to str via CT charset, and failing that, try
+    # utf-8 using surrogate escapes.
+    cte_payload = part.get_payload(decode=True)
+    if (    six.PY3 and
+            part.get_content_maintype() == 'text' and
+            isinstance(cte_payload, bytes)):
+        charset = part.get_charset() or 'utf-8'
+        return cte_payload.decode(charset, errors='surrogateescape')
+    return cte_payload
+
+
 # Path for DMI Data
 DMI_SYS_PATH = "/sys/class/dmi/id"
 
-- 
cgit v1.2.3


From f24d43d9df2b972720a93120bd60bc8ce86dc1f6 Mon Sep 17 00:00:00 2001
From: Barry Warsaw <barry@python.org>
Date: Tue, 27 Jan 2015 15:13:24 -0500
Subject: Remove debugging turd.

---
 cloudinit/user_data.py | 1 -
 1 file changed, 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 5fdc46f2..fe343d0c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -122,7 +122,6 @@ class UserDataProcessor(object):
                     ctype_orig = None
                     was_compressed = True
                 except util.DecompressionError as e:
-                    import pdb; pdb.set_trace()
                     LOG.warn("Failed decompressing payload from %s of length"
                              " %s due to: %s", ctype_orig, len(payload), e)
                     continue
-- 
cgit v1.2.3


From c8e75ddf808f4ac017f609bafc101648b7568935 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Feb 2015 17:17:48 +0000
Subject: fix use of 'letters' and translate

---
 cloudinit/config/cc_set_passwords.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 4ca85e21..8b705d90 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -28,11 +28,11 @@ from cloudinit import distros as ds
 from cloudinit import ssh_util
 from cloudinit import util
 
-from string import letters, digits
+from string import ascii_letters, digits
 
 # We are removing certain 'painful' letters/numbers
-PW_SET = (letters.translate(None, 'loLOI') +
-          digits.translate(None, '01'))
+PW_SET = (''.join([x for x in ascii_letters + digits 
+                   if x not in 'loLOI01']))
 
 
 def handle(_name, cfg, cloud, log, args):
-- 
cgit v1.2.3


From 888db3e6bb9076973d2f6a73e0c4f691caa89603 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Feb 2015 17:25:01 +0000
Subject: fix parse_qs usage

---
 cloudinit/config/cc_rightscale_userdata.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 1f769c0a..24880d13 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -41,7 +41,7 @@ from cloudinit.settings import PER_INSTANCE
 from cloudinit import url_helper as uhelp
 from cloudinit import util
 
-from urlparse import parse_qs
+from six.moves.urllib_parse import parse_qs
 
 frequency = PER_INSTANCE
 
-- 
cgit v1.2.3


From b8eb55f9acdf92a58d3c72b0c5e5437c4f0272c1 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Feb 2015 21:33:11 +0000
Subject: use encode_text

---
 bin/cloud-init                 | 2 +-
 cloudinit/config/cc_bootcmd.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index d67b2b6d..6c83c2e7 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -428,7 +428,7 @@ def atomic_write_json(path, data):
     try:
         tf = tempfile.NamedTemporaryFile(dir=os.path.dirname(path),
                                          delete=False)
-        tf.write((json.dumps(data, indent=1) + "\n").encode())
+        tf.write(util.encode_text(json.dumps(data, indent=1) + "\n"))
         tf.close()
         os.rename(tf.name, path)
     except Exception as e:
diff --git a/cloudinit/config/cc_bootcmd.py b/cloudinit/config/cc_bootcmd.py
index 3ac22967..a295cc4e 100644
--- a/cloudinit/config/cc_bootcmd.py
+++ b/cloudinit/config/cc_bootcmd.py
@@ -36,7 +36,7 @@ def handle(name, cfg, cloud, log, _args):
     with util.ExtendedTemporaryFile(suffix=".sh") as tmpf:
         try:
             content = util.shellify(cfg["bootcmd"])
-            tmpf.write(content)
+            tmpf.write(util.encode_text(content))
             tmpf.flush()
         except:
             util.logexc(log, "Failed to shellify bootcmd")
-- 
cgit v1.2.3


From f62b86bd45c8df78ada32ab4040a639c9d096202 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 11 Feb 2015 01:09:34 +0000
Subject: fix random_seed module

---
 cloudinit/config/cc_seed_random.py   | 16 ++++++++--------
 cloudinit/sources/DataSourceAzure.py |  3 ++-
 cloudinit/util.py                    | 16 +++++++++-------
 3 files changed, 19 insertions(+), 16 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index bb64b0f5..3288a853 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -22,7 +22,7 @@
 import base64
 import os
 
-from six import StringIO
+from six import BytesIO
 
 from cloudinit.settings import PER_INSTANCE
 from cloudinit import log as logging
@@ -34,13 +34,13 @@ LOG = logging.getLogger(__name__)
 
 def _decode(data, encoding=None):
     if not data:
-        return ''
+        return b''
     if not encoding or encoding.lower() in ['raw']:
-        return data
+        return util.encode_text(data)
     elif encoding.lower() in ['base64', 'b64']:
-        return util.b64d(data)
+        return base64.b64decode(data)
     elif encoding.lower() in ['gzip', 'gz']:
-        return util.decomp_gzip(data, quiet=False)
+        return util.decomp_gzip(data, quiet=False, decode=None)
     else:
         raise IOError("Unknown random_seed encoding: %s" % (encoding))
 
@@ -65,9 +65,9 @@ def handle_random_seed_command(command, required, env=None):
 def handle(name, cfg, cloud, log, _args):
     mycfg = cfg.get('random_seed', {})
     seed_path = mycfg.get('file', '/dev/urandom')
-    seed_data = mycfg.get('data', '')
+    seed_data = mycfg.get('data', b'')
 
-    seed_buf = StringIO()
+    seed_buf = BytesIO()
     if seed_data:
         seed_buf.write(_decode(seed_data, encoding=mycfg.get('encoding')))
 
@@ -75,7 +75,7 @@ def handle(name, cfg, cloud, log, _args):
     # openstack meta_data.json
     metadata = cloud.datasource.metadata
     if metadata and 'random_seed' in metadata:
-        seed_buf.write(metadata['random_seed'])
+        seed_buf.write(util.encode_text(metadata['random_seed']))
 
     seed_data = seed_buf.getvalue()
     if len(seed_data):
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 29ae2c22..c599d50f 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -124,7 +124,8 @@ class DataSourceAzureNet(sources.DataSource):
             LOG.debug("using files cached in %s", ddir)
 
         # azure / hyper-v provides random data here
-        seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
+        seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
+                              quiet=True, decode=False)
         if seed:
             self.metadata['random_seed'] = seed
 
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 3a921afe..c998154a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -96,11 +96,10 @@ def b64d(source):
     # Base64 decode some data, accepting bytes or unicode/str, and returning
     # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
     decoded = b64decode(source)
-    if isinstance(decoded, bytes):
-        try:
-            return decoded.decode('utf-8')
-        except UnicodeDecodeError:
-            return decoded
+    try:
+        return decoded.decode('utf-8')
+    except UnicodeDecodeError:
+        return decoded
 
 def b64e(source):
     # Base64 encode some data, accepting bytes or unicode/str, and returning
@@ -354,11 +353,14 @@ def clean_filename(fn):
     return fn
 
 
-def decomp_gzip(data, quiet=True):
+def decomp_gzip(data, quiet=True, decode=True):
     try:
         buf = six.BytesIO(encode_text(data))
         with contextlib.closing(gzip.GzipFile(None, "rb", 1, buf)) as gh:
-            return decode_binary(gh.read())
+            if decode:
+                return decode_binary(gh.read())
+            else:
+                return gh.read()
     except Exception as e:
         if quiet:
             return data
-- 
cgit v1.2.3


From db66b4a42e24dc83d9316df14485c8413ac94abe Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Feb 2015 20:50:23 -0500
Subject: pep8

---
 cloudinit/util.py               |  3 ++-
 tests/unittests/test_cs_util.py | 16 ++++++++--------
 2 files changed, 10 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index c998154a..b845adfd 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -101,6 +101,7 @@ def b64d(source):
     except UnicodeDecodeError:
         return decoded
 
+
 def b64e(source):
     # Base64 encode some data, accepting bytes or unicode/str, and returning
     # str/unicode if the result is utf-8 compatible, otherwise returning bytes.
@@ -116,7 +117,7 @@ def fully_decoded_payload(part):
     # bytes, first try to decode to str via CT charset, and failing that, try
     # utf-8 using surrogate escapes.
     cte_payload = part.get_payload(decode=True)
-    if (    six.PY3 and
+    if (six.PY3 and
             part.get_content_maintype() == 'text' and
             isinstance(cte_payload, bytes)):
         charset = part.get_charset() or 'utf-8'
diff --git a/tests/unittests/test_cs_util.py b/tests/unittests/test_cs_util.py
index 337ac9a0..d7273035 100644
--- a/tests/unittests/test_cs_util.py
+++ b/tests/unittests/test_cs_util.py
@@ -47,14 +47,14 @@ class CepkoMock(Cepko):
 class CepkoResultTests(unittest.TestCase):
     def setUp(self):
         pass
-        ## self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko",
-        ##                     spec=CepkoMock,
-        ##                     count=False,
-        ##                     passthrough=False)
-        ## self.mocked()
-        ## self.mocker.result(CepkoMock())
-        ## self.mocker.replay()
-        ## self.c = Cepko()
+        # self.mocked = self.mocker.replace("cloudinit.cs_utils.Cepko",
+        #                     spec=CepkoMock,
+        #                     count=False,
+        #                     passthrough=False)
+        # self.mocked()
+        # self.mocker.result(CepkoMock())
+        # self.mocker.replay()
+        # self.c = Cepko()
 
     def test_getitem(self):
         result = self.c.all()
-- 
cgit v1.2.3


From f67d459da3d81f3b4c4c4171eaf5940dbc73ea25 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Feb 2015 20:50:45 -0500
Subject: pep8

---
 cloudinit/config/cc_set_passwords.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_set_passwords.py b/cloudinit/config/cc_set_passwords.py
index 8b705d90..0c315361 100644
--- a/cloudinit/config/cc_set_passwords.py
+++ b/cloudinit/config/cc_set_passwords.py
@@ -31,7 +31,7 @@ from cloudinit import util
 from string import ascii_letters, digits
 
 # We are removing certain 'painful' letters/numbers
-PW_SET = (''.join([x for x in ascii_letters + digits 
+PW_SET = (''.join([x for x in ascii_letters + digits
                    if x not in 'loLOI01']))
 
 
-- 
cgit v1.2.3


From f38a2047731530dfa796056c6b1a07d2a9158e66 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 11 Feb 2015 10:33:00 +0000
Subject: Fix reference to non-existent variable.

---
 cloudinit/util.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index b845adfd..d63b4bf2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1481,8 +1481,8 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
                               device, mtype, exc)
                     pass
             if not mountpoint:
-                raise MountFailedError("Failed mounting %s to %s due to: %s" %
-                                       (device, tmpd, exc))
+                raise MountFailedError(
+                    "Failed mounting %s to %s".format(device, tmpd))
 
         # Be nice and ensure it ends with a slash
         if not mountpoint.endswith("/"):
-- 
cgit v1.2.3


From 85953f737b77b55a0fbe160b158f2ce77730e523 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 11 Feb 2015 17:24:08 +0000
Subject: Open /dev/console in text mode (so we don't have to encode strings to
 write them).

---
 cloudinit/util.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index d63b4bf2..fe606f23 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -404,7 +404,7 @@ def multi_log(text, console=True, stderr=True,
     if console:
         conpath = "/dev/console"
         if os.path.exists(conpath):
-            with open(conpath, 'wb') as wfh:
+            with open(conpath, 'w') as wfh:
                 wfh.write(text)
                 wfh.flush()
         else:
-- 
cgit v1.2.3


From ceb229043cec98d79aa8e72c6eb5e79f796a96d7 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 11 Feb 2015 12:57:50 -0500
Subject: provide default final message in jinja to avoid WARN in log

---
 cloudinit/config/cc_final_message.py | 9 ++++++---
 doc/examples/cloud-config.txt        | 4 +++-
 2 files changed, 9 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_final_message.py b/cloudinit/config/cc_final_message.py
index b24294e4..ad957e12 100644
--- a/cloudinit/config/cc_final_message.py
+++ b/cloudinit/config/cc_final_message.py
@@ -26,9 +26,12 @@ from cloudinit.settings import PER_ALWAYS
 
 frequency = PER_ALWAYS
 
-# Cheetah formated default message
-FINAL_MESSAGE_DEF = ("Cloud-init v. ${version} finished at ${timestamp}."
-                     " Datasource ${datasource}.  Up ${uptime} seconds")
+# Jinja formated default message
+FINAL_MESSAGE_DEF = (
+  "## template: jinja\n"
+  "Cloud-init v. {{version}} finished at {{timestamp}}."
+  " Datasource {{datasource}}.  Up {{uptime}} seconds"
+)
 
 
 def handle(_name, cfg, cloud, log, args):
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index ed4eb7fc..1c59c2cf 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -484,7 +484,9 @@ resize_rootfs: True
 # final_message
 # default: cloud-init boot finished at $TIMESTAMP. Up $UPTIME seconds
 # this message is written by cloud-final when the system is finished
-# its first boot
+# its first boot.
+# This message is rendered as if it were a template.  If you
+# want jinja, you have to start the line with '## template:jinja\n'
 final_message: "The system is finally up, after $UPTIME seconds"
 
 # configure where output will go
-- 
cgit v1.2.3


From 9acda162465a9b580a79a0fdc2c8e003151c7ead Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 11 Feb 2015 13:45:55 -0500
Subject: pickle contents: be careful loading and storing pickle to be binary

---
 cloudinit/stages.py | 9 ++++++---
 1 file changed, 6 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index f4f4591d..c5b1ded0 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -180,9 +180,12 @@ class Init(object):
         pickled_fn = self.paths.get_ipath_cur('obj_pkl')
         pickle_contents = None
         try:
-            pickle_contents = util.load_file(pickled_fn)
-        except Exception:
+            pickle_contents = util.load_file(pickled_fn, decode=False)
+        except Exception as e:
+            if os.path.isfile(pickled_fn):
+                LOG.warn("failed loading pickle in %s: %s" % (pickled_fn, e))
             pass
+
         # This is expected so just return nothing
         # successfully loaded...
         if not pickle_contents:
@@ -203,7 +206,7 @@ class Init(object):
             util.logexc(LOG, "Failed pickling datasource %s", self.datasource)
             return False
         try:
-            util.write_file(pickled_fn, pk_contents, mode=0o400)
+            util.write_file(pickled_fn, pk_contents, omode="wb", mode=0o400)
         except Exception:
             util.logexc(LOG, "Failed pickling datasource to %s", pickled_fn)
             return False
-- 
cgit v1.2.3


From 587387cfbff7a89573128dc958df903d1becbde1 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 11 Feb 2015 13:58:23 -0500
Subject: include exception in error again.

it is admittedly not clear, but 'exc' should be definied if
mountpoint is not.
---
 cloudinit/util.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index fe606f23..67ea5553 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1481,8 +1481,8 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
                               device, mtype, exc)
                     pass
             if not mountpoint:
-                raise MountFailedError(
-                    "Failed mounting %s to %s".format(device, tmpd))
+                raise MountFailedError("Failed mounting %s to %s due to: %s" %
+                                       (device, tmpd, exc))
 
         # Be nice and ensure it ends with a slash
         if not mountpoint.endswith("/"):
-- 
cgit v1.2.3


From dcd1590469f7ad1806d1e94abe1badd51edce7ac Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 12 Feb 2015 09:44:28 -0500
Subject: mount_cb: fix scoping of an exception in python3

---
 cloudinit/util.py | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 67ea5553..4fbdf0a9 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1453,6 +1453,7 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
         if device in mounted:
             mountpoint = mounted[device]['mountpoint']
         else:
+            failure_reason = None
             for mtype in mtypes:
                 mountpoint = None
                 try:
@@ -1479,10 +1480,10 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
                 except (IOError, OSError) as exc:
                     LOG.debug("Failed mount of '%s' as '%s': %s",
                               device, mtype, exc)
-                    pass
+                    failure_reason = exc
             if not mountpoint:
                 raise MountFailedError("Failed mounting %s to %s due to: %s" %
-                                       (device, tmpd, exc))
+                                       (device, tmpd, failure_reason))
 
         # Be nice and ensure it ends with a slash
         if not mountpoint.endswith("/"):
-- 
cgit v1.2.3


From 9c224b8bbe5e133fca00d04d070337ffed23bbd9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 13 Feb 2015 16:04:03 -0500
Subject: fix usage of python2 'print'

---
 cloudinit/config/cc_disk_setup.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d8553167..f899210b 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -618,7 +618,7 @@ def exec_mkpart_gpt(device, layout):
                     [SGDISK_CMD,
                      '-t', '{}:{}'.format(index, partition_type), device])
     except Exception:
-        print "Failed to partition device %s" % (device,)
+        LOG.warn("Failed to partition device %s" % device)
         raise
 
 
-- 
cgit v1.2.3


From 10aeda45b32645542d03cd42bd830558a6354495 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 17 Feb 2015 16:33:23 +0000
Subject: Clean up imports in DataSourceCloudStack.py.

---
 cloudinit/sources/DataSourceCloudStack.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 1bbeca59..b8974dc1 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -26,14 +26,13 @@
 
 import os
 import time
+from socket import inet_ntoa
+from struct import pack
 
 from cloudinit import ec2_utils as ec2
 from cloudinit import log as logging
-from cloudinit import sources
 from cloudinit import url_helper as uhelp
-from cloudinit import util
-from socket import inet_ntoa
-from struct import pack
+from cloudinit import sources, util
 
 LOG = logging.getLogger(__name__)
 
-- 
cgit v1.2.3


From e626359a6ea47880f0c17add03502513ee3a6792 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 17 Feb 2015 16:33:23 +0000
Subject: Fetch and use passwords from CloudStack virtual router.

---
 cloudinit/sources/DataSourceCloudStack.py | 36 ++++++++++++++++++++++++++++---
 1 file changed, 33 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index b8974dc1..0377d940 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -29,6 +29,8 @@ import time
 from socket import inet_ntoa
 from struct import pack
 
+from six.moves import http_client
+
 from cloudinit import ec2_utils as ec2
 from cloudinit import log as logging
 from cloudinit import url_helper as uhelp
@@ -44,10 +46,11 @@ class DataSourceCloudStack(sources.DataSource):
         # Cloudstack has its metadata/userdata URLs located at
         # http://<virtual-router-ip>/latest/
         self.api_ver = 'latest'
-        vr_addr = get_vr_address()
-        if not vr_addr:
+        self.vr_addr = get_vr_address()
+        if not self.vr_addr:
             raise RuntimeError("No virtual router found!")
-        self.metadata_address = "http://%s/" % (vr_addr)
+        self.metadata_address = "http://%s/" % (self.vr_addr,)
+        self.cfg = {}
 
     def _get_url_settings(self):
         mcfg = self.ds_cfg
@@ -92,6 +95,9 @@ class DataSourceCloudStack(sources.DataSource):
 
         return bool(url)
 
+    def get_config_obj(self):
+        return self.cfg
+
     def get_data(self):
         seed_ret = {}
         if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
@@ -109,12 +115,36 @@ class DataSourceCloudStack(sources.DataSource):
                                                       self.metadata_address)
             LOG.debug("Crawl of metadata service took %s seconds",
                       int(time.time() - start_time))
+            set_password = self.get_password()
+            if set_password:
+                self.cfg = {
+                    'ssh_pwauth': True,
+                    'password': set_password,
+                    'chpasswd': {
+                        'expire': False,
+                    },
+                }
             return True
         except Exception:
             util.logexc(LOG, 'Failed fetching from metadata service %s',
                         self.metadata_address)
             return False
 
+    def get_password(self):
+        def _do_request(req_string):
+            conn = http_client.HTTPConnection(self.vr_addr, 8080)
+            conn.request('GET', '', headers={'DomU_Request': req_string})
+            output = conn.sock.recv(1024).decode('utf-8').strip()
+            conn.close()
+            return output
+        password = _do_request('send_my_password')
+        if password in ['', 'saved_password']:
+            return None
+        if password == 'bad_request':
+            raise RuntimeError('Error when attempting to fetch root password.')
+        _do_request('saved_password')
+        return password
+
     def get_instance_id(self):
         return self.metadata['instance-id']
 
-- 
cgit v1.2.3


From e01795dac74cd31bd6054e3185c2dba6203690ca Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 17 Feb 2015 16:33:23 +0000
Subject: Add explanatory comment.

---
 cloudinit/sources/DataSourceCloudStack.py | 3 +++
 1 file changed, 3 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0377d940..5eda10a5 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -132,6 +132,9 @@ class DataSourceCloudStack(sources.DataSource):
 
     def get_password(self):
         def _do_request(req_string):
+            # We have to provide a valid HTTP request, but a valid HTTP
+            # response is not returned. This means that getresponse() chokes,
+            # so we use the socket directly to read off the password.
             conn = http_client.HTTPConnection(self.vr_addr, 8080)
             conn.request('GET', '', headers={'DomU_Request': req_string})
             output = conn.sock.recv(1024).decode('utf-8').strip()
-- 
cgit v1.2.3


From 589ced475c9e200d4645f0b06f7846dae412b194 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 18 Feb 2015 13:30:51 +0000
Subject: Read ovf-env.xml as bytes.

This should fix the Azure data source on Python 3, and is appropriate as
XML shouldn't really be read as a string.
---
 cloudinit/sources/DataSourceAzure.py          | 4 ++--
 tests/unittests/helpers.py                    | 5 +++--
 tests/unittests/test_datasource/test_azure.py | 6 ++++++
 3 files changed, 11 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 444070bb..6e030217 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -137,7 +137,7 @@ class DataSourceAzureNet(sources.DataSource):
 
         if found != ddir:
             cached_ovfenv = util.load_file(
-                os.path.join(ddir, 'ovf-env.xml'), quiet=True)
+                os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False)
             if cached_ovfenv != files['ovf-env.xml']:
                 # source was not walinux-agent's datadir, so we have to clean
                 # up so 'wait_for_files' doesn't return early due to stale data
@@ -593,7 +593,7 @@ def load_azure_ds_dir(source_dir):
     if not os.path.isfile(ovf_file):
         raise NonAzureDataSource("No ovf-env file found")
 
-    with open(ovf_file, "r") as fp:
+    with open(ovf_file, "rb") as fp:
         contents = fp.read()
 
     md, ud, cfg = read_azure_ovf(contents)
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index ce77af93..7516bd02 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -287,10 +287,11 @@ def populate_dir(path, files):
     if not os.path.exists(path):
         os.makedirs(path)
     for (name, content) in files.items():
-        with open(os.path.join(path, name), "w") as fp:
-            fp.write(content)
+        with open(os.path.join(path, name), "wb") as fp:
+            fp.write(content.encode('utf-8'))
             fp.close()
 
+
 try:
     skipIf = unittest.skipIf
 except AttributeError:
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 965bce4b..38d70fcd 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -360,6 +360,12 @@ class TestAzureDataSource(TestCase):
         self.assertTrue(os.path.exists(ovf_env_path))
         self.assertEqual(xml, load_file(ovf_env_path))
 
+    def test_ovf_can_include_unicode(self):
+        xml = construct_valid_ovf_env(data={})
+        xml = u'\ufeff{0}'.format(xml)
+        dsrc = self._get_ds({'ovfcontent': xml})
+        dsrc.get_data()
+
     def test_existing_ovf_same(self):
         # waagent/SharedConfig left alone if found ovf-env.xml same as cached
         odata = {'UserData': b64e("SOMEUSERDATA")}
-- 
cgit v1.2.3


From 5e864eb373ead67d2bc29a19d970f9d3d94c53df Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 18 Feb 2015 18:09:34 +0000
Subject: Failing to fetch a CloudStack password should never fail the whole
 DS.

There might be some CloudStack deployments without the :8080 password
server, and there's no reason the rest of the data source can't be used
for them.
---
 cloudinit/sources/DataSourceCloudStack.py | 24 +++++++++++++++---------
 1 file changed, 15 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 5eda10a5..a8f8daec 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -115,15 +115,21 @@ class DataSourceCloudStack(sources.DataSource):
                                                       self.metadata_address)
             LOG.debug("Crawl of metadata service took %s seconds",
                       int(time.time() - start_time))
-            set_password = self.get_password()
-            if set_password:
-                self.cfg = {
-                    'ssh_pwauth': True,
-                    'password': set_password,
-                    'chpasswd': {
-                        'expire': False,
-                    },
-                }
+            try:
+                set_password = self.get_password()
+            except Exception:
+                util.logexc(LOG,
+                            'Failed to fetch password from virtual router %s',
+                            self.vr_addr)
+            else:
+                if set_password:
+                    self.cfg = {
+                        'ssh_pwauth': True,
+                        'password': set_password,
+                        'chpasswd': {
+                            'expire': False,
+                        },
+                    }
             return True
         except Exception:
             util.logexc(LOG, 'Failed fetching from metadata service %s',
-- 
cgit v1.2.3


From d3d44a3efaf22c91d342f2cb81470745b7be0658 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 18 Feb 2015 18:10:15 +0000
Subject: Set an explicit timeout when fetching CloudStack passwords.

---
 cloudinit/sources/DataSourceCloudStack.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index a8f8daec..89f58e1e 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -143,6 +143,7 @@ class DataSourceCloudStack(sources.DataSource):
             # so we use the socket directly to read off the password.
             conn = http_client.HTTPConnection(self.vr_addr, 8080)
             conn.request('GET', '', headers={'DomU_Request': req_string})
+            conn.sock.settimeout(30)
             output = conn.sock.recv(1024).decode('utf-8').strip()
             conn.close()
             return output
-- 
cgit v1.2.3


From b57c6a109491f344fa6e6fc2593ab2e60ca65249 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 20 Feb 2015 10:57:06 +0000
Subject: Minor formatting clean-up in CloudStack DS.

---
 cloudinit/sources/DataSourceCloudStack.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 89f58e1e..85f20c23 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -84,14 +84,14 @@ class DataSourceCloudStack(sources.DataSource):
                                   'latest/meta-data/instance-id')]
         start_time = time.time()
         url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
-                                timeout=timeout, status_cb=LOG.warn)
+                                 timeout=timeout, status_cb=LOG.warn)
 
         if url:
             LOG.debug("Using metadata source: '%s'", url)
         else:
             LOG.critical(("Giving up on waiting for the metadata from %s"
                           " after %s seconds"),
-                          urls, int(time.time() - start_time))
+                         urls, int(time.time() - start_time))
 
         return bool(url)
 
@@ -109,8 +109,8 @@ class DataSourceCloudStack(sources.DataSource):
             if not self.wait_for_metadata_service():
                 return False
             start_time = time.time()
-            self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
-                self.metadata_address)
+            self.userdata_raw = ec2.get_instance_userdata(
+                self.api_ver, self.metadata_address)
             self.metadata = ec2.get_instance_metadata(self.api_ver,
                                                       self.metadata_address)
             LOG.debug("Crawl of metadata service took %s seconds",
@@ -231,7 +231,7 @@ def get_vr_address():
 
 # Used to match classes to dependencies
 datasources = [
-  (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+    (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
 ]
 
 
-- 
cgit v1.2.3


From f8d9ebbe3743bcada75bc1a980b49f493e2da2f1 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 20 Feb 2015 10:57:18 +0000
Subject: Split CloudStack password handling out to separate class.

---
 cloudinit/sources/DataSourceCloudStack.py | 65 +++++++++++++++++++++----------
 1 file changed, 45 insertions(+), 20 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 85f20c23..0c3c51c0 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -39,6 +39,49 @@ from cloudinit import sources, util
 LOG = logging.getLogger(__name__)
 
 
+class CloudStackPasswordServerClient(object):
+    """
+    Implements password fetching from the CloudStack password server.
+
+    http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates
+    has documentation about the system.  This implementation is following that
+    found at
+    https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian
+
+    The CloudStack password server is, essentially, a broken HTTP
+    server. It requires us to provide a valid HTTP request (including a
+    DomU_Request header, which is the meat of the request), but just
+    writes the text of its response on to the socket, without a status
+    line or any HTTP headers.  This makes HTTP libraries sad, which
+    explains the screwiness of the implementation of this class.
+    """
+
+    def __init__(self, virtual_router_address):
+        self.virtual_router_address = virtual_router_address
+
+    def _do_request(self, domu_request):
+        # We have to provide a valid HTTP request, but a valid HTTP
+        # response is not returned. This means that getresponse() chokes,
+        # so we use the socket directly to read off the response.
+        # Because we're reading off the socket directly, we can't re-use the
+        # connection.
+        conn = http_client.HTTPConnection(self.virtual_router_address, 8080)
+        conn.request('GET', '', headers={'DomU_Request': domu_request})
+        conn.sock.settimeout(30)
+        output = conn.sock.recv(1024).decode('utf-8').strip()
+        conn.close()
+        return output
+
+    def get_password(self):
+        password = self._do_request('send_my_password')
+        if password in ['', 'saved_password']:
+            return None
+        if password == 'bad_request':
+            raise RuntimeError('Error when attempting to fetch root password.')
+        self._do_request('saved_password')
+        return password
+
+
 class DataSourceCloudStack(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -115,8 +158,9 @@ class DataSourceCloudStack(sources.DataSource):
                                                       self.metadata_address)
             LOG.debug("Crawl of metadata service took %s seconds",
                       int(time.time() - start_time))
+            password_client = CloudStackPasswordServerClient(self.vr_addr)
             try:
-                set_password = self.get_password()
+                set_password = password_client.get_password()
             except Exception:
                 util.logexc(LOG,
                             'Failed to fetch password from virtual router %s',
@@ -136,25 +180,6 @@ class DataSourceCloudStack(sources.DataSource):
                         self.metadata_address)
             return False
 
-    def get_password(self):
-        def _do_request(req_string):
-            # We have to provide a valid HTTP request, but a valid HTTP
-            # response is not returned. This means that getresponse() chokes,
-            # so we use the socket directly to read off the password.
-            conn = http_client.HTTPConnection(self.vr_addr, 8080)
-            conn.request('GET', '', headers={'DomU_Request': req_string})
-            conn.sock.settimeout(30)
-            output = conn.sock.recv(1024).decode('utf-8').strip()
-            conn.close()
-            return output
-        password = _do_request('send_my_password')
-        if password in ['', 'saved_password']:
-            return None
-        if password == 'bad_request':
-            raise RuntimeError('Error when attempting to fetch root password.')
-        _do_request('saved_password')
-        return password
-
     def get_instance_id(self):
         return self.metadata['instance-id']
 
-- 
cgit v1.2.3


From 43a8d82141c5abcdf5ca546fd5a8ebc95cb3cbaf Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Fri, 20 Feb 2015 15:19:07 -0700
Subject: Fix for Py2 to Py3 difference: cloud-init user-data mime conversion
 fails on base64 encoded data.

---
 cloudinit/user_data.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index fe343d0c..8fd7fba5 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -337,7 +337,7 @@ def convert_string(raw_data, headers=None):
     if not headers:
         headers = {}
     data = util.decomp_gzip(raw_data)
-    if "mime-version:" in data[0:4096].lower():
+    if "mime-version:" in str(data[0:4096]).lower():
         msg = email.message_from_string(data)
         for (key, val) in headers.items():
             _replace_header(msg, key, val)
-- 
cgit v1.2.3


From ef84bd214a1d5e0b922c0dd38096f694f8ff406e Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 23 Feb 2015 09:22:50 +0000
Subject: Always close the password server connection, even on failure.

---
 cloudinit/sources/DataSourceCloudStack.py | 10 ++++++----
 1 file changed, 6 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 0c3c51c0..996076b1 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -66,10 +66,12 @@ class CloudStackPasswordServerClient(object):
         # Because we're reading off the socket directly, we can't re-use the
         # connection.
         conn = http_client.HTTPConnection(self.virtual_router_address, 8080)
-        conn.request('GET', '', headers={'DomU_Request': domu_request})
-        conn.sock.settimeout(30)
-        output = conn.sock.recv(1024).decode('utf-8').strip()
-        conn.close()
+        try:
+            conn.request('GET', '', headers={'DomU_Request': domu_request})
+            conn.sock.settimeout(30)
+            output = conn.sock.recv(1024).decode('utf-8').strip()
+        finally:
+            conn.close()
         return output
 
     def get_password(self):
-- 
cgit v1.2.3


From 9ab6bbab42ffb5cadbe0afb36aa6967ed94459c3 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 23 Feb 2015 09:36:36 +0000
Subject: Add documentation about upstream CloudStack HTTP fix.

---
 cloudinit/sources/DataSourceCloudStack.py | 3 +++
 1 file changed, 3 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 996076b1..7b32e1fa 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -54,6 +54,9 @@ class CloudStackPasswordServerClient(object):
     writes the text of its response on to the socket, without a status
     line or any HTTP headers.  This makes HTTP libraries sad, which
     explains the screwiness of the implementation of this class.
+
+    This should be fixed in CloudStack by commit
+    a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014.
     """
 
     def __init__(self, virtual_router_address):
-- 
cgit v1.2.3


From f1ee9275a504c20153b795923b1f51d3005d745c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 24 Feb 2015 11:58:22 -0500
Subject: use util.decode_binary rather than str, add tests.

just seems to make more sense to decode here.

Add a test showing the previous failure (testBytesInPayload)
And one that should pass (testStringInPayload)

Also, add a test for unencoded content in the ovf xml (test_userdata_plain)
And explicitly set encoding on another test (test_userdata_found).
---
 cloudinit/user_data.py                        |  4 ++--
 tests/unittests/test_datasource/test_azure.py | 14 +++++++++++--
 tests/unittests/test_udprocess.py             | 30 +++++++++++++++++++++++++++
 3 files changed, 44 insertions(+), 4 deletions(-)
 create mode 100644 tests/unittests/test_udprocess.py

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 8fd7fba5..b11894ce 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -336,8 +336,8 @@ def convert_string(raw_data, headers=None):
         raw_data = ''
     if not headers:
         headers = {}
-    data = util.decomp_gzip(raw_data)
-    if "mime-version:" in str(data[0:4096]).lower():
+    data = util.decode_binary(util.decomp_gzip(raw_data))
+    if "mime-version:" in data[0:4096].lower():
         msg = email.message_from_string(data)
         for (key, val) in headers.items():
             _replace_header(msg, key, val)
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 38d70fcd..8112c69b 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -1,5 +1,5 @@
 from cloudinit import helpers
-from cloudinit.util import b64e, load_file
+from cloudinit.util import b64e, decode_binary, load_file
 from cloudinit.sources import DataSourceAzure
 from ..helpers import TestCase, populate_dir
 
@@ -231,9 +231,19 @@ class TestAzureDataSource(TestCase):
         self.assertEqual(defuser['passwd'],
             crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos]))
 
+    def test_userdata_plain(self):
+        mydata = "FOOBAR"
+        odata = {'UserData': {'text': mydata, 'encoding': 'plain'}}
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+
+        dsrc = self._get_ds(data)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+        self.assertEqual(decode_binary(dsrc.userdata_raw), mydata)
+
     def test_userdata_found(self):
         mydata = "FOOBAR"
-        odata = {'UserData': b64e(mydata)}
+        odata = {'UserData': {'text': b64e(mydata), 'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         dsrc = self._get_ds(data)
diff --git a/tests/unittests/test_udprocess.py b/tests/unittests/test_udprocess.py
new file mode 100644
index 00000000..39adbf9d
--- /dev/null
+++ b/tests/unittests/test_udprocess.py
@@ -0,0 +1,30 @@
+from . import helpers
+
+from six.moves import filterfalse
+
+from cloudinit import user_data as ud
+from cloudinit import util
+
+def count_messages(root):
+    am = 0
+    for m in root.walk():
+        if ud.is_skippable(m):
+            continue
+        am += 1
+    return am
+
+
+class TestUDProcess(helpers.ResourceUsingTestCase):
+
+    def testBytesInPayload(self):
+        msg = b'#cloud-config\napt_update: True\n'
+        ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+        message = ud_proc.process(msg)
+        self.assertTrue(count_messages(message) == 1)
+
+    def testStringInPayload(self):
+        msg = '#cloud-config\napt_update: True\n'
+
+        ud_proc = ud.UserDataProcessor(self.getCloudPaths())
+        message = ud_proc.process(msg)
+        self.assertTrue(count_messages(message) == 1)
-- 
cgit v1.2.3


From e2fea567772f3d178072607aee617c3792185db0 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 24 Feb 2015 16:19:02 -0500
Subject: further fixing of non-text user-data.

---
 cloudinit/stages.py | 22 ++++++++++++++++------
 1 file changed, 16 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index c5b1ded0..94fcf4cc 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -327,16 +327,26 @@ class Init(object):
         self._store_vendordata()
 
     def _store_userdata(self):
-        raw_ud = "%s" % (self.datasource.get_userdata_raw())
+        raw_ud = self.datasource.get_userdata_raw()
+        if raw_ud is None:
+            raw_ud = b''
         util.write_file(self._get_ipath('userdata_raw'), raw_ud, 0o600)
-        processed_ud = "%s" % (self.datasource.get_userdata())
-        util.write_file(self._get_ipath('userdata'), processed_ud, 0o600)
+        # processed userdata is a Mime message, so write it as string.
+        processed_ud = self.datasource.get_userdata()
+        if processed_ud is None:
+            raw_ud = ''
+        util.write_file(self._get_ipath('userdata'), str(processed_ud), 0o600)
 
     def _store_vendordata(self):
-        raw_vd = "%s" % (self.datasource.get_vendordata_raw())
+        raw_vd = self.datasource.get_vendordata_raw()
+        if raw_vd is None:
+            raw_vd = b''
         util.write_file(self._get_ipath('vendordata_raw'), raw_vd, 0o600)
-        processed_vd = "%s" % (self.datasource.get_vendordata())
-        util.write_file(self._get_ipath('vendordata'), processed_vd, 0o600)
+        # processed vendor data is a Mime message, so write it as string.
+        processed_vd = str(self.datasource.get_vendordata())
+        if processed_vd is None:
+            processed_vd = ''
+        util.write_file(self._get_ipath('vendordata'), str(processed_vd), 0o600)
 
     def _default_handlers(self, opts=None):
         if opts is None:
-- 
cgit v1.2.3


From 8cd5d7b143f882d80d45b1c04bdde1949846d4f1 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 25 Feb 2015 19:40:33 -0500
Subject: move towards user-data being binary

UrlResponse: biggest change... make readurl return bytes, making user
             know what to do with it.
util: add load_tfile_or_url for loading text file or url
      as  read_file_or_url now returns bytes

ec2_utils: all meta-data is text, remove non-obvious string translations
DigitalOcean: adjust for ec2_utils

DataSourceGCE, DataSourceMAAS: user-data is binary other fields are text.
openstack.py: read paths without decoding to text.  This is ok as paths
              other than user-data are json, and load_json will handle

load_file still returns text, and that is what most things use.
---
 cloudinit/ec2_utils.py                              | 14 +++++++++++---
 cloudinit/sources/DataSourceDigitalOcean.py         |  8 ++++++--
 cloudinit/sources/DataSourceGCE.py                  | 21 ++++++++++++---------
 cloudinit/sources/DataSourceMAAS.py                 | 14 +++++++++++---
 cloudinit/sources/helpers/openstack.py              |  2 +-
 cloudinit/url_helper.py                             |  2 +-
 cloudinit/util.py                                   | 11 ++++++++---
 tests/unittests/helpers.py                          |  5 ++++-
 tests/unittests/test_datasource/test_configdrive.py | 15 ++++++++++-----
 tests/unittests/test_datasource/test_gce.py         |  2 +-
 tests/unittests/test_datasource/test_maas.py        |  8 ++++----
 tests/unittests/test_datasource/test_nocloud.py     | 14 +++++++-------
 tests/unittests/test_datasource/test_openstack.py   |  6 +++---
 tests/unittests/test_ec2_util.py                    |  2 +-
 .../test_handler/test_handler_apt_configure.py      | 12 ++++++------
 tests/unittests/test_pathprefix2dict.py             | 10 +++++-----
 16 files changed, 91 insertions(+), 55 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index e1ed4091..7cf99186 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -41,6 +41,10 @@ class MetadataLeafDecoder(object):
     def __call__(self, field, blob):
         if not blob:
             return blob
+        try:
+            blob = util.decode_binary(blob)
+        except UnicodeDecodeError:
+            return blob
         if self._maybe_json_object(blob):
             try:
                 # Assume it's json, unless it fails parsing...
@@ -69,6 +73,8 @@ class MetadataMaterializer(object):
     def _parse(self, blob):
         leaves = {}
         children = []
+        blob = util.decode_binary(blob)
+
         if not blob:
             return (leaves, children)
 
@@ -117,12 +123,12 @@ class MetadataMaterializer(object):
             child_url = url_helper.combine_url(base_url, c)
             if not child_url.endswith("/"):
                 child_url += "/"
-            child_blob = str(self._caller(child_url))
+            child_blob = self._caller(child_url)
             child_contents[c] = self._materialize(child_blob, child_url)
         leaf_contents = {}
         for (field, resource) in leaves.items():
             leaf_url = url_helper.combine_url(base_url, resource)
-            leaf_blob = self._caller(leaf_url).contents
+            leaf_blob = self._caller(leaf_url)
             leaf_contents[field] = self._leaf_decoder(field, leaf_blob)
         joined = {}
         joined.update(child_contents)
@@ -179,11 +185,13 @@ def get_instance_metadata(api_version='latest',
     caller = functools.partial(util.read_file_or_url,
                                ssl_details=ssl_details, timeout=timeout,
                                retries=retries)
+    def mcaller(url):
+        return caller(url).contents
 
     try:
         response = caller(md_url)
         materializer = MetadataMaterializer(response.contents,
-                                            md_url, caller,
+                                            md_url, mcaller,
                                             leaf_decoder=leaf_decoder)
         md = materializer.materialize()
         if not isinstance(md, (dict)):
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 76ddaa9d..5d47564d 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -54,9 +54,13 @@ class DataSourceDigitalOcean(sources.DataSource):
     def get_data(self):
         caller = functools.partial(util.read_file_or_url,
                                    timeout=self.timeout, retries=self.retries)
-        md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
+
+        def mcaller(url):
+            return caller(url).contents
+
+        md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
                                             base_url=self.metadata_address,
-                                            caller=caller)
+                                            caller=mcaller)
 
         self.metadata = md.materialize()
 
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 6936c74e..608c07f1 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -53,15 +53,15 @@ class DataSourceGCE(sources.DataSource):
         # GCE metadata server requires a custom header since v1
         headers = {'X-Google-Metadata-Request': True}
 
-        # url_map: (our-key, path, required)
+        # url_map: (our-key, path, required, is_text)
         url_map = [
-            ('instance-id', 'instance/id', True),
-            ('availability-zone', 'instance/zone', True),
-            ('local-hostname', 'instance/hostname', True),
-            ('public-keys', 'project/attributes/sshKeys', False),
-            ('user-data', 'instance/attributes/user-data', False),
+            ('instance-id', 'instance/id', True, True),
+            ('availability-zone', 'instance/zone', True, True),
+            ('local-hostname', 'instance/hostname', True, True),
+            ('public-keys', 'project/attributes/sshKeys', False, True),
+            ('user-data', 'instance/attributes/user-data', False, False),
             ('user-data-encoding', 'instance/attributes/user-data-encoding',
-             False),
+             False, True),
         ]
 
         # if we cannot resolve the metadata server, then no point in trying
@@ -71,13 +71,16 @@ class DataSourceGCE(sources.DataSource):
 
         # iterate over url_map keys to get metadata items
         found = False
-        for (mkey, path, required) in url_map:
+        for (mkey, path, required, is_text) in url_map:
             try:
                 resp = url_helper.readurl(url=self.metadata_address + path,
                                           headers=headers)
                 if resp.code == 200:
                     found = True
-                    self.metadata[mkey] = resp.contents
+                    if is_text:
+                        self.metadata[mkey] = util.decode_binary(resp.contents)
+                    else:
+                        self.metadata[mkey] = resp.contents
                 else:
                     if required:
                         msg = "required url %s returned code %s. not GCE"
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 082cc58f..35c5b5e1 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -36,6 +36,8 @@ from cloudinit import util
 LOG = logging.getLogger(__name__)
 MD_VERSION = "2012-03-01"
 
+BINARY_FIELDS = ('user-data',)
+
 
 class DataSourceMAAS(sources.DataSource):
     """
@@ -185,7 +187,9 @@ def read_maas_seed_dir(seed_d):
     md = {}
     for fname in files:
         try:
-            md[fname] = util.load_file(os.path.join(seed_d, fname))
+            print("fname: %s / %s" % (fname, fname not in BINARY_FIELDS))
+            md[fname] = util.load_file(os.path.join(seed_d, fname),
+                                       decode=fname not in BINARY_FIELDS)
         except IOError as e:
             if e.errno != errno.ENOENT:
                 raise
@@ -218,6 +222,7 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
         'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
         'user-data': "%s/%s" % (base_url, 'user-data'),
     }
+
     md = {}
     for name in file_order:
         url = files.get(name)
@@ -238,7 +243,10 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
                                          timeout=timeout,
                                          ssl_details=ssl_details)
             if resp.ok():
-                md[name] = str(resp)
+                if name in BINARY_FIELDS:
+                    md[name] = resp.contents
+                else:
+                    md[name] = util.decode_binary(resp.contents)
             else:
                 LOG.warn(("Fetching from %s resulted in"
                           " an invalid http code %s"), url, resp.code)
@@ -263,7 +271,7 @@ def check_seed_contents(content, seed):
     if len(missing):
         raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
 
-    userdata = content.get('user-data', "")
+    userdata = content.get('user-data', b"")
     md = {}
     for (key, val) in content.items():
         if key == 'user-data':
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index 88c7a198..bd93d22f 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -327,7 +327,7 @@ class ConfigDriveReader(BaseReader):
         return os.path.join(*components)
 
     def _path_read(self, path):
-        return util.load_file(path)
+        return util.load_file(path, decode=False)
 
     def _fetch_available_versions(self):
         if self._versions is None:
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 62001dff..2d81a062 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -119,7 +119,7 @@ class UrlResponse(object):
 
     @property
     def contents(self):
-        return self._response.text
+        return self._response.content
 
     @property
     def url(self):
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 4fbdf0a9..efbc3c8d 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -739,6 +739,10 @@ def fetch_ssl_details(paths=None):
     return ssl_details
 
 
+def load_tfile_or_url(*args, **kwargs):
+    return(decode_binary(read_file_or_url(*args, **kwargs).contents))
+
+
 def read_file_or_url(url, timeout=5, retries=10,
                      headers=None, data=None, sec_between=1, ssl_details=None,
                      headers_cb=None, exception_cb=None):
@@ -750,7 +754,7 @@ def read_file_or_url(url, timeout=5, retries=10,
             LOG.warn("Unable to post data to file resource %s", url)
         file_path = url[len("file://"):]
         try:
-            contents = load_file(file_path)
+            contents = load_file(file_path, decode=False)
         except IOError as e:
             code = e.errno
             if e.errno == errno.ENOENT:
@@ -806,7 +810,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
         ud_url = "%s%s%s" % (base, "user-data", ext)
         md_url = "%s%s%s" % (base, "meta-data", ext)
 
-    md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
+    md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries)
     md = None
     if md_resp.ok():
         md = load_yaml(md_resp.contents, default={})
@@ -815,6 +819,7 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
     ud = None
     if ud_resp.ok():
         ud = ud_resp.contents
+    print("returning %s (%s)" % (ud_resp.contents.__class__, ud_resp.contents))
 
     return (md, ud)
 
@@ -2030,7 +2035,7 @@ def pathprefix2dict(base, required=None, optional=None, delim=os.path.sep):
     ret = {}
     for f in required + optional:
         try:
-            ret[f] = load_file(base + delim + f, quiet=False)
+            ret[f] = load_file(base + delim + f, quiet=False, decode=False)
         except IOError as e:
             if e.errno != errno.ENOENT:
                 raise
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 7516bd02..24e1e881 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -288,7 +288,10 @@ def populate_dir(path, files):
         os.makedirs(path)
     for (name, content) in files.items():
         with open(os.path.join(path, name), "wb") as fp:
-            fp.write(content.encode('utf-8'))
+            if isinstance(content, six.binary_type):
+                fp.write(content)
+            else:
+                fp.write(content.encode('utf-8'))
             fp.close()
 
 
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index e28bdd84..83aca505 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -2,6 +2,7 @@ from copy import copy
 import json
 import os
 import shutil
+import six
 import tempfile
 
 try:
@@ -45,7 +46,7 @@ EC2_META = {
     'reservation-id': 'r-iru5qm4m',
     'security-groups': ['default']
 }
-USER_DATA = '#!/bin/sh\necho This is user data\n'
+USER_DATA = b'#!/bin/sh\necho This is user data\n'
 OSTACK_META = {
     'availability_zone': 'nova',
     'files': [{'content_path': '/content/0000', 'path': '/etc/foo.cfg'},
@@ -56,8 +57,8 @@ OSTACK_META = {
     'public_keys': {'mykey': PUBKEY},
     'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c'}
 
-CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
 
 CFG_DRIVE_FILES_V2 = {
   'ec2/2009-04-04/meta-data.json': json.dumps(EC2_META),
@@ -346,8 +347,12 @@ def populate_dir(seed_dir, files):
         dirname = os.path.dirname(path)
         if not os.path.isdir(dirname):
             os.makedirs(dirname)
-        with open(path, "w") as fp:
+        if isinstance(content, six.text_type):
+            mode = "w"
+        else:
+            mode = "wb"
+
+        with open(path, mode) as fp:
             fp.write(content)
-            fp.close()
 
 # vi: ts=4 expandtab
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 6dd4b5ed..d28f3b08 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -32,7 +32,7 @@ GCE_META = {
     'instance/zone': 'foo/bar',
     'project/attributes/sshKeys': 'user:ssh-rsa AA2..+aRD0fyVw== root@server',
     'instance/hostname': 'server.project-foo.local',
-    'instance/attributes/user-data': '/bin/echo foo\n',
+    'instance/attributes/user-data': b'/bin/echo foo\n',
 }
 
 GCE_META_PARTIAL = {
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index d25e1adc..f109bb04 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -26,7 +26,7 @@ class TestMAASDataSource(TestCase):
 
         data = {'instance-id': 'i-valid01',
             'local-hostname': 'valid01-hostname',
-            'user-data': 'valid01-userdata',
+            'user-data': b'valid01-userdata',
             'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
 
         my_d = os.path.join(self.tmp, "valid")
@@ -46,7 +46,7 @@ class TestMAASDataSource(TestCase):
 
         data = {'instance-id': 'i-valid-extra',
             'local-hostname': 'valid-extra-hostname',
-            'user-data': 'valid-extra-userdata', 'foo': 'bar'}
+            'user-data': b'valid-extra-userdata', 'foo': 'bar'}
 
         my_d = os.path.join(self.tmp, "valid_extra")
         populate_dir(my_d, data)
@@ -103,7 +103,7 @@ class TestMAASDataSource(TestCase):
             'meta-data/instance-id': 'i-instanceid',
             'meta-data/local-hostname': 'test-hostname',
             'meta-data/public-keys': 'test-hostname',
-            'user-data': 'foodata',
+            'user-data': b'foodata',
             }
         valid_order = [
             'meta-data/local-hostname',
@@ -143,7 +143,7 @@ class TestMAASDataSource(TestCase):
             userdata, metadata = DataSourceMAAS.read_maas_seed_url(
                 my_seed, header_cb=my_headers_cb, version=my_ver)
 
-            self.assertEqual("foodata", userdata)
+            self.assertEqual(b"foodata", userdata)
             self.assertEqual(metadata['instance-id'],
                              valid['meta-data/instance-id'])
             self.assertEqual(metadata['local-hostname'],
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 4f967f58..85b4c25a 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -37,7 +37,7 @@ class TestNoCloudDataSource(TestCase):
 
     def test_nocloud_seed_dir(self):
         md = {'instance-id': 'IID', 'dsmode': 'local'}
-        ud = "USER_DATA_HERE"
+        ud = b"USER_DATA_HERE"
         populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
                      {'user-data': ud, 'meta-data': yaml.safe_dump(md)})
 
@@ -92,20 +92,20 @@ class TestNoCloudDataSource(TestCase):
         data = {
             'fs_label': None,
             'meta-data': yaml.safe_dump({'instance-id': 'IID'}),
-            'user-data': "USER_DATA_RAW",
+            'user-data': b"USER_DATA_RAW",
         }
 
         sys_cfg = {'datasource': {'NoCloud': data}}
         dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
         ret = dsrc.get_data()
-        self.assertEqual(dsrc.userdata_raw, "USER_DATA_RAW")
+        self.assertEqual(dsrc.userdata_raw, b"USER_DATA_RAW")
         self.assertEqual(dsrc.metadata.get('instance-id'), 'IID')
         self.assertTrue(ret)
 
     def test_nocloud_seed_with_vendordata(self):
         md = {'instance-id': 'IID', 'dsmode': 'local'}
-        ud = "USER_DATA_HERE"
-        vd = "THIS IS MY VENDOR_DATA"
+        ud = b"USER_DATA_HERE"
+        vd = b"THIS IS MY VENDOR_DATA"
 
         populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
                      {'user-data': ud, 'meta-data': yaml.safe_dump(md),
@@ -126,7 +126,7 @@ class TestNoCloudDataSource(TestCase):
 
     def test_nocloud_no_vendordata(self):
         populate_dir(os.path.join(self.paths.seed_dir, "nocloud"),
-                     {'user-data': "ud", 'meta-data': "instance-id: IID\n"})
+                     {'user-data': b"ud", 'meta-data': "instance-id: IID\n"})
 
         sys_cfg = {'datasource': {'NoCloud': {'fs_label': None}}}
 
@@ -134,7 +134,7 @@ class TestNoCloudDataSource(TestCase):
 
         dsrc = ds(sys_cfg=sys_cfg, distro=None, paths=self.paths)
         ret = dsrc.get_data()
-        self.assertEqual(dsrc.userdata_raw, "ud")
+        self.assertEqual(dsrc.userdata_raw, b"ud")
         self.assertFalse(dsrc.vendordata)
         self.assertTrue(ret)
 
diff --git a/tests/unittests/test_datasource/test_openstack.py b/tests/unittests/test_datasource/test_openstack.py
index 81ef1546..81411ced 100644
--- a/tests/unittests/test_datasource/test_openstack.py
+++ b/tests/unittests/test_datasource/test_openstack.py
@@ -49,7 +49,7 @@ EC2_META = {
     'public-ipv4': '0.0.0.1',
     'reservation-id': 'r-iru5qm4m',
 }
-USER_DATA = '#!/bin/sh\necho This is user data\n'
+USER_DATA = b'#!/bin/sh\necho This is user data\n'
 VENDOR_DATA = {
     'magic': '',
 }
@@ -63,8 +63,8 @@ OSTACK_META = {
     'public_keys': {'mykey': PUBKEY},
     'uuid': 'b0fa911b-69d4-4476-bbe2-1c92bff6535c',
 }
-CONTENT_0 = 'This is contents of /etc/foo.cfg\n'
-CONTENT_1 = '# this is /etc/bar/bar.cfg\n'
+CONTENT_0 = b'This is contents of /etc/foo.cfg\n'
+CONTENT_1 = b'# this is /etc/bar/bar.cfg\n'
 OS_FILES = {
     'openstack/latest/meta_data.json': json.dumps(OSTACK_META),
     'openstack/latest/user_data': USER_DATA,
diff --git a/tests/unittests/test_ec2_util.py b/tests/unittests/test_ec2_util.py
index 84aa002e..bd43accf 100644
--- a/tests/unittests/test_ec2_util.py
+++ b/tests/unittests/test_ec2_util.py
@@ -16,7 +16,7 @@ class TestEc2Util(helpers.HttprettyTestCase):
                         body='stuff',
                         status=200)
         userdata = eu.get_instance_userdata(self.VERSION)
-        self.assertEquals('stuff', userdata)
+        self.assertEquals('stuff', userdata.decode('utf-8'))
 
     @hp.activate
     def test_userdata_fetch_fail_not_found(self):
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index d8fe9a4f..02cad8b2 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -30,7 +30,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = str(util.read_file_or_url(self.pfile))
+        contents = util.load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
 
     def test_apt_http_proxy_written(self):
@@ -40,7 +40,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = str(util.read_file_or_url(self.pfile))
+        contents = util.load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
 
     def test_apt_all_proxy_written(self):
@@ -58,7 +58,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = str(util.read_file_or_url(self.pfile))
+        contents = util.load_tfile_or_url(self.pfile)
 
         for ptype, pval in values.items():
             self.assertTrue(self._search_apt_config(contents, ptype, pval))
@@ -74,7 +74,7 @@ class TestAptProxyConfig(TestCase):
         cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
                                           self.pfile, self.cfile)
         self.assertTrue(os.path.isfile(self.pfile))
-        contents = str(util.read_file_or_url(self.pfile))
+        contents = util.load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "foo"))
 
     def test_config_written(self):
@@ -86,14 +86,14 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.cfile))
         self.assertFalse(os.path.isfile(self.pfile))
 
-        self.assertEqual(str(util.read_file_or_url(self.cfile)), payload)
+        self.assertEqual(util.load_tfile_or_url(self.cfile), payload)
 
     def test_config_replaced(self):
         util.write_file(self.pfile, "content doesnt matter")
         cc_apt_configure.apply_apt_config({'apt_config': "foo"},
                                           self.pfile, self.cfile)
         self.assertTrue(os.path.isfile(self.cfile))
-        self.assertEqual(str(util.read_file_or_url(self.cfile)), "foo")
+        self.assertEqual(util.load_tfile_or_url(self.cfile), "foo")
 
     def test_config_deleted(self):
         # if no 'apt_config' is provided, delete any previously written file
diff --git a/tests/unittests/test_pathprefix2dict.py b/tests/unittests/test_pathprefix2dict.py
index 7089bde6..38fd75b6 100644
--- a/tests/unittests/test_pathprefix2dict.py
+++ b/tests/unittests/test_pathprefix2dict.py
@@ -14,28 +14,28 @@ class TestPathPrefix2Dict(TestCase):
         self.addCleanup(shutil.rmtree, self.tmp)
 
     def test_required_only(self):
-        dirdata = {'f1': 'f1content', 'f2': 'f2content'}
+        dirdata = {'f1': b'f1content', 'f2': b'f2content'}
         populate_dir(self.tmp, dirdata)
 
         ret = util.pathprefix2dict(self.tmp, required=['f1', 'f2'])
         self.assertEqual(dirdata, ret)
 
     def test_required_missing(self):
-        dirdata = {'f1': 'f1content'}
+        dirdata = {'f1': b'f1content'}
         populate_dir(self.tmp, dirdata)
         kwargs = {'required': ['f1', 'f2']}
         self.assertRaises(ValueError, util.pathprefix2dict, self.tmp, **kwargs)
 
     def test_no_required_and_optional(self):
-        dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+        dirdata = {'f1': b'f1c', 'f2': b'f2c'}
         populate_dir(self.tmp, dirdata)
 
         ret = util.pathprefix2dict(self.tmp, required=None,
-                                  optional=['f1', 'f2'])
+                                   optional=['f1', 'f2'])
         self.assertEqual(dirdata, ret)
 
     def test_required_and_optional(self):
-        dirdata = {'f1': 'f1c', 'f2': 'f2c'}
+        dirdata = {'f1': b'f1c', 'f2': b'f2c'}
         populate_dir(self.tmp, dirdata)
 
         ret = util.pathprefix2dict(self.tmp, required=['f1'], optional=['f2'])
-- 
cgit v1.2.3


From fa5a94ea415fc04c0fc50e9d3c08399f43aabcef Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 25 Feb 2015 19:47:07 -0500
Subject: UserDataProcessor: during include, do not convert to string

an include can include a gzip type also or binary data
so avoid conversion here.
---
 cloudinit/user_data.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index b11894ce..77f95abb 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -239,7 +239,7 @@ class UserDataProcessor(object):
                 if include_once_on and resp.ok():
                     util.write_file(include_once_fn, resp, mode=0o600)
                 if resp.ok():
-                    content = str(resp)
+                    content = resp
                 else:
                     LOG.warn(("Fetching from %s resulted in"
                               " a invalid http code of %s"),
-- 
cgit v1.2.3


From d136a3cf9aa409a0275cc884227971c1cd21720d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Feb 2015 13:51:35 -0500
Subject: avoid conversion to string in #include

---
 cloudinit/user_data.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 77f95abb..663a9048 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -237,9 +237,9 @@ class UserDataProcessor(object):
                 resp = util.read_file_or_url(include_url,
                                              ssl_details=self.ssl_details)
                 if include_once_on and resp.ok():
-                    util.write_file(include_once_fn, resp, mode=0o600)
+                    util.write_file(include_once_fn, resp.contents, mode=0o600)
                 if resp.ok():
-                    content = resp
+                    content = resp.contents
                 else:
                     LOG.warn(("Fetching from %s resulted in"
                               " a invalid http code of %s"),
-- 
cgit v1.2.3


From f0388cfffadc0596faeda9e11775597444bff25d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 2 Mar 2015 15:41:16 -0500
Subject: pep8

---
 cloudinit/ec2_utils.py       | 1 +
 cloudinit/stages.py          | 3 ++-
 tests/unittests/test_data.py | 1 -
 3 files changed, 3 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 7cf99186..37b92a83 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -185,6 +185,7 @@ def get_instance_metadata(api_version='latest',
     caller = functools.partial(util.read_file_or_url,
                                ssl_details=ssl_details, timeout=timeout,
                                retries=retries)
+
     def mcaller(url):
         return caller(url).contents
 
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 94fcf4cc..45d64823 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -346,7 +346,8 @@ class Init(object):
         processed_vd = str(self.datasource.get_vendordata())
         if processed_vd is None:
             processed_vd = ''
-        util.write_file(self._get_ipath('vendordata'), str(processed_vd), 0o600)
+        util.write_file(self._get_ipath('vendordata'), str(processed_vd),
+                        0o600)
 
     def _default_handlers(self, opts=None):
         if opts is None:
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 48475515..8fc280e4 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -58,7 +58,6 @@ def gzip_text(text):
     return contents.getvalue()
 
 
-
 # FIXME: these tests shouldn't be checking log output??
 # Weirddddd...
 class TestConsumeUserData(helpers.FilesystemMockingTestCase):
-- 
cgit v1.2.3


From 086fd973ea489dad5f680ce18fdacf61077fa82b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 2 Mar 2015 15:48:42 -0500
Subject: url_helper.py: fix undefined variable

python2 scoping is different and running wait_for_url in python3
results in a use of undeclared variable 'e'.

$ python3 -c 'from cloudinit import url_helper; \
    url_helper.wait_for_url("o", max_wait=3,timeout=1, exception_cb=print)'
Traceback (most recent call last):
  File "<string>", line 1, in <module>
  File "cloudinit/url_helper.py", line 358, in wait_for_url
    exception_cb(msg=status_msg, exception=e)
---
 cloudinit/url_helper.py | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 2d81a062..0e65f431 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -321,7 +321,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
                     timeout = int((start_time + max_wait) - now)
 
             reason = ""
-            e = None
+            url_exc = None
             try:
                 if headers_cb is not None:
                     headers = headers_cb(url)
@@ -332,18 +332,20 @@ def wait_for_url(urls, max_wait=None, timeout=None,
                                    check_status=False)
                 if not response.contents:
                     reason = "empty response [%s]" % (response.code)
-                    e = UrlError(ValueError(reason),
-                                 code=response.code, headers=response.headers)
+                    url_exc = UrlError(ValueError(reason), code=response.code,
+                                       headers=response.headers)
                 elif not response.ok():
                     reason = "bad status code [%s]" % (response.code)
-                    e = UrlError(ValueError(reason),
-                                 code=response.code, headers=response.headers)
+                    url_exc = UrlError(ValueError(reason), code=response.code,
+                                       headers=response.headers)
                 else:
                     return url
             except UrlError as e:
                 reason = "request error [%s]" % e
+                url_exc = e
             except Exception as e:
                 reason = "unexpected error [%s]" % e
+                url_exc = e
 
             time_taken = int(time.time() - start_time)
             status_msg = "Calling '%s' failed [%s/%ss]: %s" % (url,
@@ -355,7 +357,7 @@ def wait_for_url(urls, max_wait=None, timeout=None,
                 # This can be used to alter the headers that will be sent
                 # in the future, for example this is what the MAAS datasource
                 # does.
-                exception_cb(msg=status_msg, exception=e)
+                exception_cb(msg=status_msg, exception=url_exc)
 
         if timeup(max_wait, start_time):
             break
-- 
cgit v1.2.3


From b2af44fb22719dc353bd867c2648e0dd5b2eec19 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 2 Mar 2015 15:50:43 -0500
Subject: util.py: remove 'print' debug statement

---
 cloudinit/util.py | 1 -
 1 file changed, 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index efbc3c8d..039aa3f2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -819,7 +819,6 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
     ud = None
     if ud_resp.ok():
         ud = ud_resp.contents
-    print("returning %s (%s)" % (ud_resp.contents.__class__, ud_resp.contents))
 
     return (md, ud)
 
-- 
cgit v1.2.3


From a934ae9543ccc9c13fbdedddcc04fa82853a7ec2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 2 Mar 2015 15:56:15 -0500
Subject: get_cmdline_url: fix in python3 when calling

get_cmdline_url was passing a string to response.contents.startswith()
where response.contents is now bytes.

this changes it to convert input to text, and also to default to text.
---
 cloudinit/util.py               | 4 +++-
 tests/unittests/test__init__.py | 8 ++++----
 2 files changed, 7 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 039aa3f2..cc20305c 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -970,7 +970,7 @@ def get_fqdn_from_hosts(hostname, filename="/etc/hosts"):
 
 
 def get_cmdline_url(names=('cloud-config-url', 'url'),
-                    starts="#cloud-config", cmdline=None):
+                    starts=b"#cloud-config", cmdline=None):
     if cmdline is None:
         cmdline = get_cmdline()
 
@@ -986,6 +986,8 @@ def get_cmdline_url(names=('cloud-config-url', 'url'),
         return (None, None, None)
 
     resp = read_file_or_url(url)
+    # allow callers to pass starts as text when comparing to bytes contents
+    starts = encode_text(starts)
     if resp.ok() and resp.contents.startswith(starts):
         return (key, url, resp.contents)
 
diff --git a/tests/unittests/test__init__.py b/tests/unittests/test__init__.py
index 1a307e56..c32783a6 100644
--- a/tests/unittests/test__init__.py
+++ b/tests/unittests/test__init__.py
@@ -181,7 +181,7 @@ class TestCmdlineUrl(unittest.TestCase):
     def test_invalid_content(self):
         url = "http://example.com/foo"
         key = "mykey"
-        payload = "0"
+        payload = b"0"
         cmdline = "ro %s=%s bar=1" % (key, url)
 
         with mock.patch('cloudinit.url_helper.readurl',
@@ -194,13 +194,13 @@ class TestCmdlineUrl(unittest.TestCase):
     def test_valid_content(self):
         url = "http://example.com/foo"
         key = "mykey"
-        payload = "xcloud-config\nmydata: foo\nbar: wark\n"
+        payload = b"xcloud-config\nmydata: foo\nbar: wark\n"
         cmdline = "ro %s=%s bar=1" % (key, url)
 
         with mock.patch('cloudinit.url_helper.readurl',
                         return_value=url_helper.StringResponse(payload)):
             self.assertEqual(
-                util.get_cmdline_url(names=[key], starts="xcloud-config",
+                util.get_cmdline_url(names=[key], starts=b"xcloud-config",
                                      cmdline=cmdline),
                 (key, url, payload))
 
@@ -210,7 +210,7 @@ class TestCmdlineUrl(unittest.TestCase):
         cmdline = "ro %s=%s bar=1" % (key, url)
 
         with mock.patch('cloudinit.url_helper.readurl',
-                        return_value=url_helper.StringResponse('')):
+                        return_value=url_helper.StringResponse(b'')):
             self.assertEqual(
                 util.get_cmdline_url(names=["does-not-appear"],
                                      starts="#cloud-config", cmdline=cmdline),
-- 
cgit v1.2.3


From 72958f9c40f53c634d1eb7ef55547271e1972d2c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 2 Mar 2015 16:34:46 -0500
Subject: DataSourceMAAS: fix oauthlib imports

In both python2 and python3,
This throws  "'module' object has no attribute 'oauth1'"
  $ python3 -c 'import oauthlib; oauthlib.oauth1.Client("x")'
While this works fine:
  $ python3 -c 'import oauthlib.oauth1 as oauth1; oauth1.Client("x")'
---
 cloudinit/sources/DataSourceMAAS.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 35c5b5e1..6cc010b7 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -22,7 +22,7 @@ from __future__ import print_function
 
 from email.utils import parsedate
 import errno
-import oauthlib
+import oauthlib.oauth1 as oauth1
 import os
 import time
 
@@ -283,12 +283,12 @@ def check_seed_contents(content, seed):
 
 def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
                   timestamp=None):
-    client = oauthlib.oauth1.Client(
+    client = oauth1.Client(
         consumer_key,
         client_secret=consumer_secret,
         resource_owner_key=token_key,
         resource_owner_secret=token_secret,
-        signature_method=oauthlib.SIGNATURE_PLAINTEXT)
+        signature_method=oauth1.SIGNATURE_PLAINTEXT)
     uri, signed_headers, body = client.sign(url)
     return signed_headers
 
-- 
cgit v1.2.3


From 8663b57ebba7aa4f6916f53e74df4f890bbc8b9a Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 4 Mar 2015 10:46:27 +0000
Subject: Convert dmidecode values to sysfs names before looking for them.

dmidecode and /sys/class/dmi/id/* use different names for the same
information.  This modified the logic in util.read_dmi_data to map from
dmidecode names to sysfs names before looking in sysfs.
---
 cloudinit/util.py            | 62 ++++++++++++++++++++++--------
 tests/unittests/test_util.py | 91 ++++++++++++++++++++++++--------------------
 2 files changed, 97 insertions(+), 56 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index cc20305c..f95e71c8 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -128,6 +128,28 @@ def fully_decoded_payload(part):
 # Path for DMI Data
 DMI_SYS_PATH = "/sys/class/dmi/id"
 
+# dmidecode and /sys/class/dmi/id/* use different names for the same value,
+# this allows us to refer to them by one canonical name
+DMIDECODE_TO_DMI_SYS_MAPPING = {
+    'baseboard-asset-tag': 'board_asset_tag',
+    'baseboard-manufacturer': 'board_vendor',
+    'baseboard-product-name': 'board_name',
+    'baseboard-serial-number': 'board_serial',
+    'baseboard-version': 'board_version',
+    'bios-release-date': 'bios_date',
+    'bios-vendor': 'bios_vendor',
+    'bios-version': 'bios_version',
+    'chassis-asset-tag': 'chassis_asset_tag',
+    'chassis-manufacturer': 'chassis_vendor',
+    'chassis-serial-number': 'chassis_serial',
+    'chassis-version': 'chassis_version',
+    'system-manufacturer': 'sys_vendor',
+    'system-product-name': 'product_name',
+    'system-serial-number': 'product_serial',
+    'system-uuid': 'product_uuid',
+    'system-version': 'product_version',
+}
+
 
 class ProcessExecutionError(IOError):
 
@@ -2103,24 +2125,26 @@ def _read_dmi_syspath(key):
     """
     Reads dmi data with from /sys/class/dmi/id
     """
-
-    dmi_key = "{0}/{1}".format(DMI_SYS_PATH, key)
-    LOG.debug("querying dmi data {0}".format(dmi_key))
+    if key not in DMIDECODE_TO_DMI_SYS_MAPPING:
+        return None
+    mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
+    dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
+    LOG.debug("querying dmi data {0}".format(dmi_key_path))
     try:
-        if not os.path.exists(dmi_key):
-            LOG.debug("did not find {0}".format(dmi_key))
+        if not os.path.exists(dmi_key_path):
+            LOG.debug("did not find {0}".format(dmi_key_path))
             return None
 
-        key_data = load_file(dmi_key)
+        key_data = load_file(dmi_key_path)
         if not key_data:
-            LOG.debug("{0} did not return any data".format(key))
+            LOG.debug("{0} did not return any data".format(dmi_key_path))
             return None
 
-        LOG.debug("dmi data {0} returned {0}".format(dmi_key, key_data))
+        LOG.debug("dmi data {0} returned {1}".format(dmi_key_path, key_data))
         return key_data.strip()
 
     except Exception as e:
-        logexc(LOG, "failed read of {0}".format(dmi_key), e)
+        logexc(LOG, "failed read of {0}".format(dmi_key_path), e)
         return None
 
 
@@ -2134,18 +2158,27 @@ def _call_dmidecode(key, dmidecode_path):
         (result, _err) = subp(cmd)
         LOG.debug("dmidecode returned '{0}' for '{0}'".format(result, key))
         return result
-    except OSError as _err:
+    except (IOError, OSError) as _err:
         LOG.debug('failed dmidecode cmd: {0}\n{0}'.format(cmd, _err.message))
         return None
 
 
 def read_dmi_data(key):
     """
-    Wrapper for reading DMI data. This tries to determine whether the DMI
-    Data can be read directly, otherwise it will fallback to using dmidecode.
+    Wrapper for reading DMI data.
+
+    This will do the following (returning the first that produces a
+    result):
+        1) Use a mapping to translate `key` from dmidecode naming to
+           sysfs naming and look in /sys/class/dmi/... for a value.
+        2) Use `key` as a sysfs key directly and look in /sys/class/dmi/...
+        3) Fall-back to passing `key` to `dmidecode --string`.
+
+    If all of the above fail to find a value, None will be returned.
     """
-    if os.path.exists(DMI_SYS_PATH):
-        return _read_dmi_syspath(key)
+    syspath_value = _read_dmi_syspath(key)
+    if syspath_value is not None:
+        return syspath_value
 
     dmidecode_path = which('dmidecode')
     if dmidecode_path:
@@ -2153,5 +2186,4 @@ def read_dmi_data(key):
 
     LOG.warn("did not find either path {0} or dmidecode command".format(
              DMI_SYS_PATH))
-
     return None
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 33c191a9..7da1f755 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -323,58 +323,67 @@ class TestMountinfoParsing(helpers.ResourceUsingTestCase):
 
 class TestReadDMIData(helpers.FilesystemMockingTestCase):
 
-    def _patchIn(self, root):
-        self.patchOS(root)
-        self.patchUtils(root)
+    def setUp(self):
+        super(TestReadDMIData, self).setUp()
+        self.new_root = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.new_root)
+        self.patchOS(self.new_root)
+        self.patchUtils(self.new_root)
 
-    def _write_key(self, key, content):
-        """Mocks the sys path found on Linux systems."""
-        new_root = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, new_root)
-        self._patchIn(new_root)
+    def _create_sysfs_parent_directory(self):
         util.ensure_dir(os.path.join('sys', 'class', 'dmi', 'id'))
 
+    def _create_sysfs_file(self, key, content):
+        """Mocks the sys path found on Linux systems."""
+        self._create_sysfs_parent_directory()
         dmi_key = "/sys/class/dmi/id/{0}".format(key)
         util.write_file(dmi_key, content)
 
-    def _no_syspath(self, key, content):
+    def _configure_dmidecode_return(self, key, content, error=None):
         """
         In order to test a missing sys path and call outs to dmidecode, this
         function fakes the results of dmidecode to test the results.
         """
-        new_root = tempfile.mkdtemp()
-        self.addCleanup(shutil.rmtree, new_root)
-        self._patchIn(new_root)
-        self.real_which = util.which
-        self.real_subp = util.subp
-
-        def _which(key):
-            return True
-        util.which = _which
-
-        def _cdd(_key, error=None):
+        def _dmidecode_subp(cmd):
+            if cmd[-1] != key:
+                raise util.ProcessExecutionError()
             return (content, error)
-        util.subp = _cdd
-
-    def test_key(self):
-        key_content = "TEST-KEY-DATA"
-        self._write_key("key", key_content)
-        self.assertEquals(key_content, util.read_dmi_data("key"))
-
-    def test_key_mismatch(self):
-        self._write_key("test", "ABC")
-        self.assertNotEqual("123", util.read_dmi_data("test"))
-
-    def test_no_key(self):
-        self._no_syspath(None, None)
-        self.assertFalse(util.read_dmi_data("key"))
-
-    def test_callout_dmidecode(self):
-        """test to make sure that dmidecode is used when no syspath"""
-        self._no_syspath("key", "stuff")
-        self.assertEquals("stuff", util.read_dmi_data("key"))
-        self._no_syspath("key", None)
-        self.assertFalse(None, util.read_dmi_data("key"))
+
+        self.patched_funcs.enter_context(
+            mock.patch.object(util, 'which', lambda _: True))
+        self.patched_funcs.enter_context(
+            mock.patch.object(util, 'subp', _dmidecode_subp))
+
+    def patch_mapping(self, new_mapping):
+        self.patched_funcs.enter_context(
+            mock.patch('cloudinit.util.DMIDECODE_TO_DMI_SYS_MAPPING',
+                       new_mapping))
+
+    def test_sysfs_used_with_key_in_mapping_and_file_on_disk(self):
+        self.patch_mapping({'mapped-key': 'mapped-value'})
+        expected_dmi_value = 'sys-used-correctly'
+        self._create_sysfs_file('mapped-value', expected_dmi_value)
+        self._configure_dmidecode_return('mapped-key', 'wrong-wrong-wrong')
+        self.assertEqual(expected_dmi_value, util.read_dmi_data('mapped-key'))
+
+    def test_dmidecode_used_if_no_sysfs_file_on_disk(self):
+        self.patch_mapping({})
+        self._create_sysfs_parent_directory()
+        expected_dmi_value = 'dmidecode-used'
+        self._configure_dmidecode_return('use-dmidecode', expected_dmi_value)
+        self.assertEqual(expected_dmi_value,
+                         util.read_dmi_data('use-dmidecode'))
+
+    def test_none_returned_if_neither_source_has_data(self):
+        self.patch_mapping({})
+        self._configure_dmidecode_return('key', 'value')
+        self.assertEqual(None, util.read_dmi_data('expect-fail'))
+
+    def test_none_returned_if_dmidecode_not_in_path(self):
+        self.patched_funcs.enter_context(
+            mock.patch.object(util, 'which', lambda _: False))
+        self.patch_mapping({})
+        self.assertEqual(None, util.read_dmi_data('expect-fail'))
 
 
 class TestMultiLog(helpers.FilesystemMockingTestCase):
-- 
cgit v1.2.3


From 30868baba637b45b654bd8a23719624a25b2a00b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 09:47:17 -0500
Subject: run emit_upstart only if upstart was init system

---
 cloudinit/config/cc_emit_upstart.py | 28 +++++++++++++++++++++++++---
 1 file changed, 25 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index 6d376184..e1b9a4c2 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -21,11 +21,32 @@
 import os
 
 from cloudinit.settings import PER_ALWAYS
+from cloudinit import log as logging
 from cloudinit import util
 
 frequency = PER_ALWAYS
 
 distros = ['ubuntu', 'debian']
+LOG = logging.getLogger(__name__)
+
+
+def is_upstart_system():
+    if not os.path.isfile("/sbin/initctl"):
+        LOG.debug(("Skipping module named %s,"
+                   " no /sbin/initctl located"), name)
+        return False
+
+    myenv = os.environ.copy()
+    if 'UPSTART_SESSION' in myenv:
+        del myenv['UPSTART_SESSION']
+    check_cmd = ['initctl', 'version']
+    try:
+        (out, err) = util.subp(check_cmd, env=myenv)
+        return 'upstart' in out
+    except util.ProcessExecutionError as e:
+        LOG.debug("'%s' returned '%s', not using upstart",
+                  ' '.join(check_cmd), e.exit_code)
+    return False
 
 
 def handle(name, _cfg, cloud, log, args):
@@ -34,10 +55,11 @@ def handle(name, _cfg, cloud, log, args):
         # Default to the 'cloud-config'
         # event for backwards compat.
         event_names = ['cloud-config']
-    if not os.path.isfile("/sbin/initctl"):
-        log.debug(("Skipping module named %s,"
-                   " no /sbin/initctl located"), name)
+
+    if not is_upstart_system():
+        log.debug("not upstart system, '%s' disabled")
         return
+
     cfgpath = cloud.paths.get_ipath_cur("cloud_config")
     for n in event_names:
         cmd = ['initctl', 'emit', str(n), 'CLOUD_CFG=%s' % cfgpath]
-- 
cgit v1.2.3


From df975abae42664bbd5fd56436eb2947e2a6f46f9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 10:10:11 -0500
Subject: add snappy module

---
 cloudinit/config/cc_snappy.py | 133 ++++++++++++++++++++++++++++++++++++++++++
 config/cloud.cfg              |   1 +
 2 files changed, 134 insertions(+)
 create mode 100644 cloudinit/config/cc_snappy.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
new file mode 100644
index 00000000..1588443f
--- /dev/null
+++ b/cloudinit/config/cc_snappy.py
@@ -0,0 +1,133 @@
+# vi: ts=4 expandtab
+#
+
+from cloudinit import log as logging
+from cloudinit import templater
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+import glob
+import os
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+SNAPPY_ENV_PATH = "/writable/system-data/etc/snappy.env"
+
+CI_SNAPPY_CFG = {
+    'env_file_path': SNAPPY_ENV_PATH,
+    'packages': [],
+    'packages_dir': '/writable/user-data/cloud-init/click_packages',
+    'ssh_enabled': False
+}
+
+"""
+snappy:
+  ssh_enabled: True
+  packages:
+    - etcd
+    - {'name': 'pkg1', 'config': "wark"}
+"""
+
+
+def flatten(data, fill=None, tok="_", prefix='', recurse=True):
+    if fill is None:
+        fill = {}
+    for key, val in data.items():
+        key = key.replace("-", "_")
+        if isinstance(val, dict) and recurse:
+            flatten(val, fill, tok=tok, prefix=prefix + key + tok,
+                    recurse=recurse)
+        elif isinstance(key, str):
+            fill[prefix + key] = val
+    return fill
+
+
+def render2env(data, tok="_", prefix=''):
+    flat = flatten(data, tok=tok, prefix=prefix)
+    ret = ["%s='%s'" % (key, val) for key, val in flat.items()]
+    return '\n'.join(ret) + '\n'
+
+
+def install_package(pkg_name, config=None):
+    cmd = ["snappy", "install"]
+    if config:
+        if os.path.isfile(config):
+            cmd.append("--config-file=" + config)
+        else:
+            cmd.append("--config=" + config)
+    cmd.append(pkg_name)
+    util.subp(cmd)
+
+
+def install_packages(package_dir, packages):
+    local_pkgs = glob.glob(os.path.sep.join([package_dir, '*.click']))
+    LOG.debug("installing local packages %s" % local_pkgs)
+    if local_pkgs:
+        for pkg in local_pkgs:
+            cfg = pkg.replace(".click", ".config")
+            if not os.path.isfile(cfg):
+                cfg = None
+            install_package(pkg, config=cfg)
+
+    LOG.debug("installing click packages")
+    if packages:
+        for pkg in packages:
+            if not pkg:
+                continue
+            if isinstance(pkg, str):
+                name = pkg
+                config = None
+            elif pkg:
+                name = pkg.get('name', pkg)
+                config = pkg.get('config')
+            install_package(pkg_name=name, config=config)
+
+
+def disable_enable_ssh(enabled):
+    LOG.debug("setting enablement of ssh to: %s", enabled)
+    # do something here that would enable or disable
+    not_to_be_run = "/etc/ssh/sshd_not_to_be_run"
+    if enabled:
+        util.del_file(not_to_be_run)
+        # this is an indempotent operation
+        util.subp(["systemctl", "start", "ssh"])
+    else:
+        # this is an indempotent operation
+        util.subp(["systemctl", "stop", "ssh"])
+        util.write_file(not_to_be_run, "cloud-init\n")
+
+
+def handle(name, cfg, cloud, log, args):
+    mycfg = cfg.get('snappy', {'ssh_enabled': False})
+
+    if not mycfg:
+        LOG.debug("%s: no top level found", name)
+        return
+
+    # take out of 'cfg' the cfg keys that cloud-init uses, so
+    # mycfg has only content external to cloud-init.
+    ci_cfg = CI_SNAPPY_CFG.copy()
+    for i in ci_cfg:
+        if i in mycfg:
+            ci_cfg[i] = mycfg[i]
+            del mycfg[i]
+
+    # render the flattened environment variable style file to a path
+    # this was useful for systemd config environment files.  given:
+    # snappy:
+    #   foo:
+    #     bar: wark
+    #     cfg1:
+    #       key1: value
+    # you get the following in env_file_path.
+    #   foo_bar=wark
+    #   foo_cfg1_key1=value
+    contents = render2env(mycfg)
+    header = '# for internal use only, not a guaranteed interface\n'
+    util.write_file(ci_cfg['env_file_path'], header + render2env(mycfg))
+
+    install_packages(ci_cfg['packages_dir'],
+                     ci_cfg['packages'])
+
+    disable_enable_ssh(ci_cfg.get('ssh_enabled', False))
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 200050d3..e96e1781 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -48,6 +48,7 @@ cloud_config_modules:
  - ssh-import-id
  - locale
  - set-passwords
+ - snappy
  - grub-dpkg
  - apt-pipelining
  - apt-configure
-- 
cgit v1.2.3


From 28f8d44533ef263f7a35a9f11e328dd9035fb82a Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 4 Mar 2015 15:57:41 +0000
Subject: Use more consistent logging invocation.

---
 cloudinit/util.py | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index f95e71c8..b6065410 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2129,22 +2129,22 @@ def _read_dmi_syspath(key):
         return None
     mapped_key = DMIDECODE_TO_DMI_SYS_MAPPING[key]
     dmi_key_path = "{0}/{1}".format(DMI_SYS_PATH, mapped_key)
-    LOG.debug("querying dmi data {0}".format(dmi_key_path))
+    LOG.debug("querying dmi data %s", dmi_key_path)
     try:
         if not os.path.exists(dmi_key_path):
-            LOG.debug("did not find {0}".format(dmi_key_path))
+            LOG.debug("did not find %s", dmi_key_path)
             return None
 
         key_data = load_file(dmi_key_path)
         if not key_data:
-            LOG.debug("{0} did not return any data".format(dmi_key_path))
+            LOG.debug("%s did not return any data", dmi_key_path)
             return None
 
-        LOG.debug("dmi data {0} returned {1}".format(dmi_key_path, key_data))
+        LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
         return key_data.strip()
 
     except Exception as e:
-        logexc(LOG, "failed read of {0}".format(dmi_key_path), e)
+        logexc(LOG, "failed read of %s", dmi_key_path, e)
         return None
 
 
@@ -2156,10 +2156,10 @@ def _call_dmidecode(key, dmidecode_path):
     try:
         cmd = [dmidecode_path, "--string", key]
         (result, _err) = subp(cmd)
-        LOG.debug("dmidecode returned '{0}' for '{0}'".format(result, key))
+        LOG.debug("dmidecode returned '%s' for '%s'", result, key)
         return result
     except (IOError, OSError) as _err:
-        LOG.debug('failed dmidecode cmd: {0}\n{0}'.format(cmd, _err.message))
+        LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
         return None
 
 
@@ -2184,6 +2184,6 @@ def read_dmi_data(key):
     if dmidecode_path:
         return _call_dmidecode(key, dmidecode_path)
 
-    LOG.warn("did not find either path {0} or dmidecode command".format(
-             DMI_SYS_PATH))
+    LOG.warn("did not find either path %s or dmidecode command",
+             DMI_SYS_PATH)
     return None
-- 
cgit v1.2.3


From 014468ea3fb36e81a3e5a6fc593ce91571c1495f Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 4 Mar 2015 17:20:22 +0000
Subject: Fix invalid format string in CloudSigma logging.

---
 cloudinit/sources/DataSourceCloudSigma.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 76597116..f8f94759 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -59,7 +59,7 @@ class DataSourceCloudSigma(sources.DataSource):
             LOG.warn("failed to get hypervisor product name via dmi data")
             return False
         else:
-            LOG.debug("detected hypervisor as {}".format(sys_product_name))
+            LOG.debug("detected hypervisor as %s", sys_product_name)
             return 'cloudsigma' in sys_product_name.lower()
 
         LOG.warn("failed to query dmi data for system product name")
-- 
cgit v1.2.3


From 5eb2aab5d010e7b8d5e4146959e50f2a9f67d504 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 4 Mar 2015 17:20:48 +0000
Subject: Add util.message_from_string to wrap email.message_from_string.

This is to work-around the fact that email.message_from_string uses
cStringIO in Python 2.6, which can't handle Unicode.
---
 cloudinit/user_data.py       | 4 +---
 cloudinit/util.py            | 7 +++++++
 tests/unittests/test_util.py | 7 +++++++
 3 files changed, 15 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 663a9048..eb3c7336 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -22,8 +22,6 @@
 
 import os
 
-import email
-
 from email.mime.base import MIMEBase
 from email.mime.multipart import MIMEMultipart
 from email.mime.nonmultipart import MIMENonMultipart
@@ -338,7 +336,7 @@ def convert_string(raw_data, headers=None):
         headers = {}
     data = util.decode_binary(util.decomp_gzip(raw_data))
     if "mime-version:" in data[0:4096].lower():
-        msg = email.message_from_string(data)
+        msg = util.message_from_string(data)
         for (key, val) in headers.items():
             _replace_header(msg, key, val)
     else:
diff --git a/cloudinit/util.py b/cloudinit/util.py
index b6065410..971c1c2d 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -23,6 +23,7 @@
 import contextlib
 import copy as obj_copy
 import ctypes
+import email
 import errno
 import glob
 import grp
@@ -2187,3 +2188,9 @@ def read_dmi_data(key):
     LOG.warn("did not find either path %s or dmidecode command",
              DMI_SYS_PATH)
     return None
+
+
+def message_from_string(string):
+    if sys.version_info[:2] < (2, 7):
+        return email.message_from_file(six.StringIO(string))
+    return email.message_from_string(string)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 7da1f755..1619b5d2 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -452,4 +452,11 @@ class TestMultiLog(helpers.FilesystemMockingTestCase):
         util.multi_log('message', log=log, log_level=log_level)
         self.assertEqual((log_level, mock.ANY), log.log.call_args[0])
 
+
+class TestMessageFromString(helpers.TestCase):
+
+    def test_unicode_not_messed_up(self):
+        roundtripped = util.message_from_string(u'\n').as_string()
+        self.assertNotIn('\x00', roundtripped)
+
 # vi: ts=4 expandtab
-- 
cgit v1.2.3


From ac445690ab7a69ac6e9c74b136de9968b0c839df Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 12:42:34 -0500
Subject: fix logging perms with list rather than single

---
 cloudinit/settings.py         |  2 +-
 cloudinit/stages.py           | 21 +++++++++++++++------
 doc/examples/cloud-config.txt |  2 ++
 3 files changed, 18 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index 5efcb0b0..b61e5613 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -47,7 +47,7 @@ CFG_BUILTIN = {
     ],
     'def_log_file': '/var/log/cloud-init.log',
     'log_cfgs': [],
-    'syslog_fix_perms': 'syslog:adm',
+    'syslog_fix_perms': ['syslog:adm', 'root:adm'],
     'system_info': {
         'paths': {
             'cloud_dir': '/var/lib/cloud',
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 45d64823..d28e765b 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -148,16 +148,25 @@ class Init(object):
     def _initialize_filesystem(self):
         util.ensure_dirs(self._initial_subdirs())
         log_file = util.get_cfg_option_str(self.cfg, 'def_log_file')
-        perms = util.get_cfg_option_str(self.cfg, 'syslog_fix_perms')
         if log_file:
             util.ensure_file(log_file)
-            if perms:
-                u, g = util.extract_usergroup(perms)
+            perms = self.cfg.get('syslog_fix_perms')
+            if not perms:
+                perms = {}
+            if not isinstance(perms, list):
+                perms = [perms]
+
+            error = None
+            for perm in perms:
+                u, g = util.extract_usergroup(perm)
                 try:
                     util.chownbyname(log_file, u, g)
-                except OSError:
-                    util.logexc(LOG, "Unable to change the ownership of %s to "
-                                "user %s, group %s", log_file, u, g)
+                    return
+                except OSError as e:
+                    error = e
+
+            LOG.warn("Failed changing perms on '%s'. tried: %s. %s",
+                     log_file, ','.join(perms), error)
 
     def read_cfg(self, extra_fns=None):
         # None check so that we don't keep on re-loading if empty
diff --git a/doc/examples/cloud-config.txt b/doc/examples/cloud-config.txt
index 1c59c2cf..1236796c 100644
--- a/doc/examples/cloud-config.txt
+++ b/doc/examples/cloud-config.txt
@@ -536,6 +536,8 @@ timezone: US/Eastern
 # 
 # to remedy this situation, 'def_log_file' can be set to a filename
 # and syslog_fix_perms to a string containing "<user>:<group>"
+# if syslog_fix_perms is a list, it will iterate through and use the
+# first pair that does not raise error.
 #
 # the default values are '/var/log/cloud-init.log' and 'syslog:adm'
 # the value of 'def_log_file' should match what is configured in logging
-- 
cgit v1.2.3


From 2db45b26d2e5412aa55b33ec924afce7c0dbd12c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 14:49:44 -0500
Subject: locale: make able to be turned off

---
 cloudinit/config/cc_locale.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_locale.py b/cloudinit/config/cc_locale.py
index 6feaae9d..bbe5fcae 100644
--- a/cloudinit/config/cc_locale.py
+++ b/cloudinit/config/cc_locale.py
@@ -27,9 +27,9 @@ def handle(name, cfg, cloud, log, args):
     else:
         locale = util.get_cfg_option_str(cfg, "locale", cloud.get_locale())
 
-    if not locale:
-        log.debug(("Skipping module named %s, "
-                   "no 'locale' configuration found"), name)
+    if util.is_false(locale):
+        log.debug("Skipping module named %s, disabled by config: %s",
+                  name, locale)
         return
 
     log.debug("Setting locale to %s", locale)
-- 
cgit v1.2.3


From 0ce85f53c15bce21e65a419fe71127c6d94064aa Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 14:49:59 -0500
Subject: grub-dpkg: allow to be disabled

---
 cloudinit/config/cc_grub_dpkg.py | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index e3219e81..456597af 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -25,15 +25,20 @@ from cloudinit import util
 distros = ['ubuntu', 'debian']
 
 
-def handle(_name, cfg, _cloud, log, _args):
-    idevs = None
-    idevs_empty = None
+def handle(name, cfg, _cloud, log, _args):
 
-    if "grub-dpkg" in cfg:
-        idevs = util.get_cfg_option_str(cfg["grub-dpkg"],
-            "grub-pc/install_devices", None)
-        idevs_empty = util.get_cfg_option_str(cfg["grub-dpkg"],
-            "grub-pc/install_devices_empty", None)
+    mycfg = cfg.get("grub_dpkg", cfg.get("grub-dpkg", {}))
+    if not mycfg:
+        mycfg = {}
+
+    enabled = mycfg.get('enabled', True)
+    if util.is_false(enabled):
+        log.debug("%s disabled by config grub_dpkg/enabled=%s", name, enabled)
+        return
+
+    idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
+    idevs_empty = util.get_cfg_option_str(mycfg,
+        "grub-pc/install_devices_empty", None)
 
     if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
             (os.path.exists("/dev/xvda1")
-- 
cgit v1.2.3


From e7cce1a06429813b8d2acc87e6609671d39a3254 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 4 Mar 2015 15:51:46 -0500
Subject: apt_configure: allow disabling

---
 cloudinit/config/cc_apt_configure.py | 4 ++++
 1 file changed, 4 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index de72903f..2c51d116 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -51,6 +51,10 @@ EXPORT_GPG_KEYID = """
 
 
 def handle(name, cfg, cloud, log, _args):
+    if util.is_false(cfg.get('apt_configure_enabled', True)):
+        log.debug("Skipping module named %s, disabled by config.", name)
+        return
+
     release = get_release()
     mirrors = find_apt_mirror_info(cloud, cfg)
     if not mirrors or "primary" not in mirrors:
-- 
cgit v1.2.3


From 46da1b83fba8d1e70dc58dbbf18697216b1eb1e3 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 5 Mar 2015 11:18:45 -0500
Subject: fixes bug: https://launchpad.net/bugs/1428495 snappy: disable by
 default

this does 2 things actually
a.) disables snappy by default, and adds checks to filesystem to enable it
b.) removes the 'render2env' that was mostly spike code.
---
 cloudinit/config/cc_snappy.py | 74 ++++++++++++++++---------------------------
 1 file changed, 27 insertions(+), 47 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 1588443f..32fbc9f6 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -14,15 +14,16 @@ LOG = logging.getLogger(__name__)
 frequency = PER_INSTANCE
 SNAPPY_ENV_PATH = "/writable/system-data/etc/snappy.env"
 
-CI_SNAPPY_CFG = {
-    'env_file_path': SNAPPY_ENV_PATH,
+BUILTIN_CFG = {
     'packages': [],
     'packages_dir': '/writable/user-data/cloud-init/click_packages',
-    'ssh_enabled': False
+    'ssh_enabled': False,
+    'system_snappy': "auto"
 }
 
 """
 snappy:
+  system_snappy: auto
   ssh_enabled: True
   packages:
     - etcd
@@ -30,25 +31,6 @@ snappy:
 """
 
 
-def flatten(data, fill=None, tok="_", prefix='', recurse=True):
-    if fill is None:
-        fill = {}
-    for key, val in data.items():
-        key = key.replace("-", "_")
-        if isinstance(val, dict) and recurse:
-            flatten(val, fill, tok=tok, prefix=prefix + key + tok,
-                    recurse=recurse)
-        elif isinstance(key, str):
-            fill[prefix + key] = val
-    return fill
-
-
-def render2env(data, tok="_", prefix=''):
-    flat = flatten(data, tok=tok, prefix=prefix)
-    ret = ["%s='%s'" % (key, val) for key, val in flat.items()]
-    return '\n'.join(ret) + '\n'
-
-
 def install_package(pkg_name, config=None):
     cmd = ["snappy", "install"]
     if config:
@@ -98,34 +80,32 @@ def disable_enable_ssh(enabled):
         util.write_file(not_to_be_run, "cloud-init\n")
 
 
-def handle(name, cfg, cloud, log, args):
-    mycfg = cfg.get('snappy', {'ssh_enabled': False})
+def system_is_snappy():
+    # channel.ini is configparser loadable.
+    # snappy will move to using /etc/system-image/config.d/*.ini
+    # this is certainly not a perfect test, but good enough for now.
+    content = util.load_file("/etc/system-image/channel.ini")
+    if 'ubuntu-core' in content.lower():
+        return True
+    if os.path.isdir("/etc/system-image/config.d/"):
+        return True
+    return False
+
 
-    if not mycfg:
-        LOG.debug("%s: no top level found", name)
+def handle(name, cfg, cloud, log, args):
+    cfgin = cfg.get('snappy')
+    if not cfgin:
+        cfgin = {}
+    mycfg = util.mergemanydict([BUILTIN_CFG, cfgin])
+
+    sys_snappy = mycfg.get("system_snappy", "auto")
+    if util.is_false(sys_snappy):
+        LOG.debug("%s: System is not snappy. disabling", name)
         return
 
-    # take out of 'cfg' the cfg keys that cloud-init uses, so
-    # mycfg has only content external to cloud-init.
-    ci_cfg = CI_SNAPPY_CFG.copy()
-    for i in ci_cfg:
-        if i in mycfg:
-            ci_cfg[i] = mycfg[i]
-            del mycfg[i]
-
-    # render the flattened environment variable style file to a path
-    # this was useful for systemd config environment files.  given:
-    # snappy:
-    #   foo:
-    #     bar: wark
-    #     cfg1:
-    #       key1: value
-    # you get the following in env_file_path.
-    #   foo_bar=wark
-    #   foo_cfg1_key1=value
-    contents = render2env(mycfg)
-    header = '# for internal use only, not a guaranteed interface\n'
-    util.write_file(ci_cfg['env_file_path'], header + render2env(mycfg))
+    if sys_snappy.lower() == "auto" and not(system_is_snappy()):
+        LOG.debug("%s: 'auto' mode, and system not snappy", name)
+        return
 
     install_packages(ci_cfg['packages_dir'],
                      ci_cfg['packages'])
-- 
cgit v1.2.3


From 692078e75a3f8af92a0151ad30b6a4ecc64b4b35 Mon Sep 17 00:00:00 2001
From: Oleg Strikov <oleg.strikov@canonical.com>
Date: Thu, 5 Mar 2015 20:26:10 +0300
Subject: DataSourceMAAS: generate oauth headers with adjusted timestamp in
 case of clock skew

This functionality has been introduced to fix LP: #978127, but was lost
while migrating cloud-init to python3.
---
 cloudinit/sources/DataSourceMAAS.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 6cc010b7..9f9cf3ab 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -288,7 +288,8 @@ def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
         client_secret=consumer_secret,
         resource_owner_key=token_key,
         resource_owner_secret=token_secret,
-        signature_method=oauth1.SIGNATURE_PLAINTEXT)
+        signature_method=oauth1.SIGNATURE_PLAINTEXT,
+        timestamp=timestamp)
     uri, signed_headers, body = client.sign(url)
     return signed_headers
 
-- 
cgit v1.2.3


From d05f1b00e2498343c03ba2de543990fffde8a02f Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 5 Mar 2015 12:26:26 -0500
Subject: do not raise exception on non-existant channel.ini file

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 32fbc9f6..8d73dca3 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -84,7 +84,7 @@ def system_is_snappy():
     # channel.ini is configparser loadable.
     # snappy will move to using /etc/system-image/config.d/*.ini
     # this is certainly not a perfect test, but good enough for now.
-    content = util.load_file("/etc/system-image/channel.ini")
+    content = util.load_file("/etc/system-image/channel.ini", quiet=True)
     if 'ubuntu-core' in content.lower():
         return True
     if os.path.isdir("/etc/system-image/config.d/"):
-- 
cgit v1.2.3


From c501a37e94b9601740fd7b3dcbcc4cce9136d7f4 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 5 Mar 2015 13:16:28 -0500
Subject: fixes from testing

---
 cloudinit/config/cc_snappy.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 8d73dca3..133336d4 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -96,9 +96,9 @@ def handle(name, cfg, cloud, log, args):
     cfgin = cfg.get('snappy')
     if not cfgin:
         cfgin = {}
-    mycfg = util.mergemanydict([BUILTIN_CFG, cfgin])
+    mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
 
-    sys_snappy = mycfg.get("system_snappy", "auto")
+    sys_snappy = str(mycfg.get("system_snappy", "auto"))
     if util.is_false(sys_snappy):
         LOG.debug("%s: System is not snappy. disabling", name)
         return
@@ -107,7 +107,7 @@ def handle(name, cfg, cloud, log, args):
         LOG.debug("%s: 'auto' mode, and system not snappy", name)
         return
 
-    install_packages(ci_cfg['packages_dir'],
-                     ci_cfg['packages'])
+    install_packages(mycfg['packages_dir'],
+                     mycfg['packages'])
 
-    disable_enable_ssh(ci_cfg.get('ssh_enabled', False))
+    disable_enable_ssh(mycfg.get('ssh_enabled', False))
-- 
cgit v1.2.3


From ec23db8b0450c8f76305295bea5ec3178dd5f176 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Mar 2015 16:18:20 -0400
Subject: DataSourceMAAS: remove debug statement

---
 cloudinit/sources/DataSourceMAAS.py | 1 -
 1 file changed, 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 9f9cf3ab..53f097e6 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -187,7 +187,6 @@ def read_maas_seed_dir(seed_d):
     md = {}
     for fname in files:
         try:
-            print("fname: %s / %s" % (fname, fname not in BINARY_FIELDS))
             md[fname] = util.load_file(os.path.join(seed_d, fname),
                                        decode=fname not in BINARY_FIELDS)
         except IOError as e:
-- 
cgit v1.2.3


From 5f2b73c8ae292cf400b811f3b3f808be6019a60c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 10 Mar 2015 17:04:59 -0400
Subject: DataSourceMAAS: fix timestamp error in oauthlib

oddly enough, the timestamp you pass into oauthlib must be a None
or a string.  If not, raises ValueError:
  Only unicode objects are escapable. Got 1426021488 of type <class 'int'>
---
 cloudinit/sources/DataSourceMAAS.py | 5 +++++
 1 file changed, 5 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 53f097e6..c1a0eb61 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -282,6 +282,11 @@ def check_seed_contents(content, seed):
 
 def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
                   timestamp=None):
+    if timestamp:
+        timestamp = str(timestamp)
+    else:
+        timestamp = None
+
     client = oauth1.Client(
         consumer_key,
         client_secret=consumer_secret,
-- 
cgit v1.2.3


From 31a8aab92656279b141a9c29e484c4895bde15d3 Mon Sep 17 00:00:00 2001
From: Oleg Strikov <oleg.strikov@canonical.com>
Date: Wed, 11 Mar 2015 20:22:54 +0300
Subject: userdata-handlers: python3-related fixes on do-not-process-this-part
 path

Cloud-init crashed when received multipart userdata object with
'application/octet-stream' part or some other 'application/*' part
except archived ones (x-gzip and friends). These parts are not
processed by cloud-init and result only in a message in the log.
We used some non-python3-friendly techniques while generating
this log message which was a reason for the crash.
---
 cloudinit/handlers/__init__.py | 24 ++++++++++++++++++------
 tests/unittests/test_data.py   | 18 ++++++++++++++++++
 2 files changed, 36 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 6b7abbcd..d62fcd19 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -163,12 +163,19 @@ def walker_handle_handler(pdata, _ctype, _filename, payload):
 
 
 def _extract_first_or_bytes(blob, size):
-    # Extract the first line upto X bytes or X bytes from more than the
-    # first line if the first line does not contain enough bytes
-    first_line = blob.split("\n", 1)[0]
-    if len(first_line) >= size:
-        start = first_line[:size]
-    else:
+    # Extract the first line or upto X symbols for text objects
+    # Extract first X bytes for binary objects
+    try:
+        if isinstance(blob, six.string_types):
+            start = blob.split("\n", 1)[0]
+        else:
+            # We want to avoid decoding the whole blob (it might be huge)
+            # By taking 4*size bytes we have a guarantee to decode size utf8 chars
+            start = blob[:4*size].decode(errors='ignore').split("\n", 1)[0]
+        if len(start) >= size:
+            start = start[:size]
+    except UnicodeDecodeError:
+        # Bytes array doesn't contain a text object -- return chunk of raw bytes
         start = blob[0:size]
     return start
 
@@ -183,6 +190,11 @@ def _escape_string(text):
         except TypeError:
             # Give up...
             pass
+    except AttributeError:
+        # We're in Python3 and received blob as text
+        # No escaping is needed because bytes are printed
+        # as 'b\xAA\xBB' automatically in Python3
+        pass
     return text
 
 
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 8fc280e4..4f24e2dd 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -13,6 +13,7 @@ except ImportError:
 
 from six import BytesIO, StringIO
 
+from email import encoders
 from email.mime.application import MIMEApplication
 from email.mime.base import MIMEBase
 from email.mime.multipart import MIMEMultipart
@@ -492,6 +493,23 @@ c: 4
             mock.call(ci.paths.get_ipath("cloud_config"), "", 0o600),
             ])
 
+    def test_mime_application_octet_stream(self):
+        """Mime message of type application/octet-stream is ignored but shows warning."""
+        ci = stages.Init()
+        message = MIMEBase("application", "octet-stream")
+        message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc\xbf')
+        encoders.encode_base64(message)
+        ci.datasource = FakeDataSource(message.as_string().encode())
+
+        with mock.patch('cloudinit.util.write_file') as mockobj:
+            log_file = self.capture_log(logging.WARNING)
+            ci.fetch()
+            ci.consume_data()
+            self.assertIn(
+                "Unhandled unknown content-type (application/octet-stream)",
+                log_file.getvalue())
+        mockobj.assert_called_once_with(
+            ci.paths.get_ipath("cloud_config"), "", 0o600)
 
 class TestUDProcess(helpers.ResourceUsingTestCase):
 
-- 
cgit v1.2.3


From c8a7b446de26c6bc19df1b8bb7d2b39cb9487749 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 13 Mar 2015 10:18:12 +0000
Subject: Write and read bytes to/from the SmartOS serial console.

---
 cloudinit/sources/DataSourceSmartOS.py          |  5 +++--
 tests/unittests/test_datasource/test_smartos.py | 15 ++++++++++-----
 2 files changed, 13 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 9d48beab..896fde3f 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -319,7 +319,8 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
         return False
 
     ser = get_serial(seed_device, seed_timeout)
-    ser.write("GET %s\n" % noun.rstrip())
+    request_line = "GET %s\n" % noun.rstrip()
+    ser.write(request_line.encode('ascii'))
     status = str(ser.readline()).rstrip()
     response = []
     eom_found = False
@@ -329,7 +330,7 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
         return default
 
     while not eom_found:
-        m = ser.readline()
+        m = ser.readline().decode('ascii')
         if m.rstrip() == ".":
             eom_found = True
         else:
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 8b62b1b1..cb0ab984 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -36,6 +36,8 @@ import tempfile
 import stat
 import uuid
 
+import six
+
 
 MOCK_RETURNS = {
     'hostname': 'test-host',
@@ -78,24 +80,27 @@ class MockSerial(object):
         return True
 
     def write(self, line):
-        line = line.replace('GET ', '')
+        if not isinstance(line, six.binary_type):
+            raise TypeError("Should be writing binary lines.")
+        line = line.decode('ascii').replace('GET ', '')
         self.last = line.rstrip()
 
     def readline(self):
         if self.new:
             self.new = False
             if self.last in self.mockdata:
-                return 'SUCCESS\n'
+                line = 'SUCCESS\n'
             else:
-                return 'NOTFOUND %s\n' % self.last
+                line = 'NOTFOUND %s\n' % self.last
 
-        if self.last in self.mockdata:
+        elif self.last in self.mockdata:
             if not self.mocked_out:
                 self.mocked_out = [x for x in self._format_out()]
 
             if len(self.mocked_out) > self.count:
                 self.count += 1
-                return self.mocked_out[self.count - 1]
+                line = self.mocked_out[self.count - 1]
+        return line.encode('ascii')
 
     def _format_out(self):
         if self.last in self.mockdata:
-- 
cgit v1.2.3


From 516af9ba927dd9b4dcc3461f8a8bb6883c61c036 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 16 Mar 2015 13:20:26 -0400
Subject: emit_upstart: fix use of undeclared variable

---
 cloudinit/config/cc_emit_upstart.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_emit_upstart.py b/cloudinit/config/cc_emit_upstart.py
index e1b9a4c2..86ae97ab 100644
--- a/cloudinit/config/cc_emit_upstart.py
+++ b/cloudinit/config/cc_emit_upstart.py
@@ -32,8 +32,7 @@ LOG = logging.getLogger(__name__)
 
 def is_upstart_system():
     if not os.path.isfile("/sbin/initctl"):
-        LOG.debug(("Skipping module named %s,"
-                   " no /sbin/initctl located"), name)
+        LOG.debug("no /sbin/initctl located")
         return False
 
     myenv = os.environ.copy()
-- 
cgit v1.2.3


From 0b9e0444f5092e647a3fa55887d96ffaf3d23c06 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 18 Mar 2015 13:33:12 +0000
Subject: Update is_disk_used for changed enumerate_disk output.

Fixes Launchpad bug #1311463.
---
 cloudinit/config/cc_disk_setup.py | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index f899210b..e2ce6db4 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -304,8 +304,7 @@ def is_disk_used(device):
 
     # If the child count is higher 1, then there are child nodes
     # such as partition or device mapper nodes
-    use_count = [x for x in enumerate_disk(device)]
-    if len(use_count.splitlines()) > 1:
+    if len(list(enumerate_disk(device))) > 1:
         return True
 
     # If we see a file system, then its used
-- 
cgit v1.2.3


From 7c63a4096d9b6c9dc10605c289ee048c7b0778c6 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 15:54:07 +0000
Subject: Convert DataSourceSmartOS to use v2 metadata.

---
 cloudinit/sources/DataSourceSmartOS.py          |  75 +++++---
 tests/unittests/test_datasource/test_smartos.py | 216 ++++++++++++++++++++----
 2 files changed, 239 insertions(+), 52 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 896fde3f..694a011a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -29,9 +29,10 @@
 #       http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
 #       Comments with "@datadictionary" are snippets of the definition
 
-import base64
 import binascii
 import os
+import random
+import re
 import serial
 
 from cloudinit import log as logging
@@ -301,6 +302,53 @@ def get_serial(seed_device, seed_timeout):
     return ser
 
 
+class JoyentMetadataFetchException(Exception):
+    pass
+
+
+class JoyentMetadataClient(object):
+
+    def __init__(self, serial):
+        self.serial = serial
+
+    def _checksum(self, body):
+        return '{0:08x}'.format(
+            binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+
+    def _get_value_from_frame(self, expected_request_id, frame):
+        regex = (
+            r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
+            r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
+            r'( (?P<payload>.+))?)')
+        frame_data = re.match(regex, frame).groupdict()
+        if int(frame_data['length']) != len(frame_data['body']):
+            raise JoyentMetadataFetchException(
+                'Incorrect frame length given ({0} != {1}).'.format(
+                    frame_data['length'], len(frame_data['body'])))
+        expected_checksum = self._checksum(frame_data['body'])
+        if frame_data['checksum'] != expected_checksum:
+            raise JoyentMetadataFetchException(
+                'Invalid checksum (expected: {0}; got {1}).'.format(
+                    expected_checksum, frame_data['checksum']))
+        if frame_data['request_id'] != expected_request_id:
+            raise JoyentMetadataFetchException(
+                'Request ID mismatch (expected: {0}; got {1}).'.format(
+                    expected_request_id, frame_data['request_id']))
+        if not frame_data.get('payload', None):
+            return None
+        return util.b64d(frame_data['payload'])
+
+    def get_metadata(self, metadata_key):
+        request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
+        message_body = '{0} GET {1}'.format(request_id,
+                                            util.b64e(metadata_key))
+        msg = 'V2 {0} {1} {2}\n'.format(
+            len(message_body), self._checksum(message_body), message_body)
+        self.serial.write(msg.encode('ascii'))
+        response = self.serial.readline().decode('ascii')
+        return self._get_value_from_frame(request_id, response)
+
+
 def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
                b64=None):
     """Makes a request to via the serial console via "GET <NOUN>"
@@ -314,34 +362,21 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
         encoded, so this method relies on being told if the data is base64 or
         not.
     """
-
     if not noun:
         return False
 
     ser = get_serial(seed_device, seed_timeout)
-    request_line = "GET %s\n" % noun.rstrip()
-    ser.write(request_line.encode('ascii'))
-    status = str(ser.readline()).rstrip()
-    response = []
-    eom_found = False
-
-    if 'SUCCESS' not in status:
-        ser.close()
-        return default
-
-    while not eom_found:
-        m = ser.readline().decode('ascii')
-        if m.rstrip() == ".":
-            eom_found = True
-        else:
-            response.append(m)
 
+    client = JoyentMetadataClient(ser)
+    response = client.get_metadata(noun)
     ser.close()
+    if response is None:
+        return default
 
     if b64 is None:
         b64 = query_data('b64-%s' % noun, seed_device=seed_device,
-                            seed_timeout=seed_timeout, b64=False,
-                            default=False, strip=True)
+                         seed_timeout=seed_timeout, b64=False,
+                         default=False, strip=True)
         b64 = util.is_true(b64)
 
     resp = None
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index cdd83bf8..c79cf3aa 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -31,15 +31,24 @@ import shutil
 import stat
 import tempfile
 import uuid
+from binascii import crc32
+
+import serial
+import six
 
 import six
 
 from cloudinit import helpers as c_helpers
 from cloudinit.sources import DataSourceSmartOS
-from cloudinit.util import b64e
+from cloudinit.util import b64d, b64e
 
 from .. import helpers
 
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
 MOCK_RETURNS = {
     'hostname': 'test-host',
     'root_authorized_keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname',
@@ -57,6 +66,37 @@ MOCK_RETURNS = {
 DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
 
 
+def _checksum(body):
+    return '{0:08x}'.format(crc32(body.encode('utf-8')) & 0xffffffff)
+
+
+def _generate_v2_frame(request_id, command, body=None):
+    body_parts = [request_id, command]
+    if body:
+        body_parts.append(b64e(body))
+    message_body = ' '.join(body_parts)
+    return 'V2 {0} {1} {2}\n'.format(
+        len(message_body), _checksum(message_body), message_body).encode(
+            'ascii')
+
+
+def _parse_v2_frame(line):
+    line = line.decode('ascii')
+    if not line.endswith('\n'):
+        raise Exception('Frames must end with a newline.')
+    version, length, checksum, body = line.strip().split(' ', 3)
+    if version != 'V2':
+        raise Exception('Frames must begin with V2.')
+    if int(length) != len(body):
+        raise Exception('Incorrect frame length given ({0} != {1}).'.format(
+            length, len(body)))
+    expected_checksum = _checksum(body)
+    if checksum != expected_checksum:
+        raise Exception('Invalid checksum.')
+    request_id, command, payload = body.split()
+    return request_id, command, b64d(payload)
+
+
 class MockSerial(object):
     """Fake a serial terminal for testing the code that
         interfaces with the serial"""
@@ -81,39 +121,21 @@ class MockSerial(object):
         return True
 
     def write(self, line):
-        if not isinstance(line, six.binary_type):
-            raise TypeError("Should be writing binary lines.")
-        line = line.decode('ascii').replace('GET ', '')
-        self.last = line.rstrip()
+        self.last = line
 
     def readline(self):
-        if self.new:
-            self.new = False
-            if self.last in self.mockdata:
-                line = 'SUCCESS\n'
-            else:
-                line = 'NOTFOUND %s\n' % self.last
-
-        elif self.last in self.mockdata:
-            if not self.mocked_out:
-                self.mocked_out = [x for x in self._format_out()]
-
-            if len(self.mocked_out) > self.count:
-                self.count += 1
-                line = self.mocked_out[self.count - 1]
-        return line.encode('ascii')
-
-    def _format_out(self):
-        if self.last in self.mockdata:
-            _mret = self.mockdata[self.last]
-            try:
-                for l in _mret.splitlines():
-                    yield "%s\n" % l.rstrip()
-            except:
-                yield "%s\n" % _mret.rstrip()
-
-            yield '.'
-            yield '\n'
+        if self.last == '\n':
+            return 'invalid command\n'
+        elif self.last == 'NEGOTIATE V2\n':
+            return 'V2_OK\n'
+        request_id, command, request_body = _parse_v2_frame(self.last)
+        if command != 'GET':
+            raise Exception('MockSerial only supports GET requests.')
+        metadata_key = request_body.strip()
+        if metadata_key in self.mockdata:
+            return _generate_v2_frame(
+                request_id, 'SUCCESS', self.mockdata[metadata_key])
+        return _generate_v2_frame(request_id, 'NOTFOUND')
 
 
 class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
@@ -459,3 +481,133 @@ def apply_patches(patches):
         setattr(ref, name, replace)
         ret.append((ref, name, orig))
     return ret
+
+
+class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
+
+    def setUp(self):
+        super(TestJoyentMetadataClient, self).setUp()
+        self.serial = mock.MagicMock(spec=serial.Serial)
+        self.request_id = 0xabcdef12
+        self.metadata_value = 'value'
+        self.response_parts = {
+            'command': 'SUCCESS',
+            'crc': 'b5a9ff00',
+            'length': 17 + len(b64e(self.metadata_value)),
+            'payload': b64e(self.metadata_value),
+            'request_id': '{0:08x}'.format(self.request_id),
+        }
+
+        def make_response():
+            payload = ''
+            if self.response_parts['payload']:
+                payload = ' {0}'.format(self.response_parts['payload'])
+            del self.response_parts['payload']
+            return (
+                'V2 {length} {crc} {request_id} {command}{payload}\n'.format(
+                    payload=payload, **self.response_parts).encode('ascii'))
+        self.serial.readline.side_effect = make_response
+        self.patched_funcs.enter_context(
+            mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
+                       mock.Mock(return_value=self.request_id)))
+
+    def _get_client(self):
+        return DataSourceSmartOS.JoyentMetadataClient(self.serial)
+
+    def assertEndsWith(self, haystack, prefix):
+        self.assertTrue(haystack.endswith(prefix),
+                        "{0} does not end with '{1}'".format(
+                            repr(haystack), prefix))
+
+    def assertStartsWith(self, haystack, prefix):
+        self.assertTrue(haystack.startswith(prefix),
+                        "{0} does not start with '{1}'".format(
+                            repr(haystack), prefix))
+
+    def test_get_metadata_writes_a_single_line(self):
+        client = self._get_client()
+        client.get_metadata('some_key')
+        self.assertEqual(1, self.serial.write.call_count)
+        written_line = self.serial.write.call_args[0][0]
+        self.assertEndsWith(written_line, b'\n')
+        self.assertEqual(1, written_line.count(b'\n'))
+
+    def _get_written_line(self, key='some_key'):
+        client = self._get_client()
+        client.get_metadata(key)
+        return self.serial.write.call_args[0][0]
+
+    def test_get_metadata_writes_bytes(self):
+        self.assertIsInstance(self._get_written_line(), six.binary_type)
+
+    def test_get_metadata_line_starts_with_v2(self):
+        self.assertStartsWith(self._get_written_line(), b'V2')
+
+    def test_get_metadata_uses_get_command(self):
+        parts = self._get_written_line().decode('ascii').strip().split(' ')
+        self.assertEqual('GET', parts[4])
+
+    def test_get_metadata_base64_encodes_argument(self):
+        key = 'my_key'
+        parts = self._get_written_line(key).decode('ascii').strip().split(' ')
+        self.assertEqual(b64e(key), parts[5])
+
+    def test_get_metadata_calculates_length_correctly(self):
+        parts = self._get_written_line().decode('ascii').strip().split(' ')
+        expected_length = len(' '.join(parts[3:]))
+        self.assertEqual(expected_length, int(parts[1]))
+
+    def test_get_metadata_uses_appropriate_request_id(self):
+        parts = self._get_written_line().decode('ascii').strip().split(' ')
+        request_id = parts[3]
+        self.assertEqual(8, len(request_id))
+        self.assertEqual(request_id, request_id.lower())
+
+    def test_get_metadata_uses_random_number_for_request_id(self):
+        line = self._get_written_line()
+        request_id = line.decode('ascii').strip().split(' ')[3]
+        self.assertEqual('{0:08x}'.format(self.request_id), request_id)
+
+    def test_get_metadata_checksums_correctly(self):
+        parts = self._get_written_line().decode('ascii').strip().split(' ')
+        expected_checksum = '{0:08x}'.format(
+            crc32(' '.join(parts[3:]).encode('utf-8')) & 0xffffffff)
+        checksum = parts[2]
+        self.assertEqual(expected_checksum, checksum)
+
+    def test_get_metadata_reads_a_line(self):
+        client = self._get_client()
+        client.get_metadata('some_key')
+        self.assertEqual(1, self.serial.readline.call_count)
+
+    def test_get_metadata_returns_valid_value(self):
+        client = self._get_client()
+        value = client.get_metadata('some_key')
+        self.assertEqual(self.metadata_value, value)
+
+    def test_get_metadata_throws_exception_for_incorrect_length(self):
+        self.response_parts['length'] = 0
+        client = self._get_client()
+        self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+                          client.get_metadata, 'some_key')
+
+    def test_get_metadata_throws_exception_for_incorrect_crc(self):
+        self.response_parts['crc'] = 'deadbeef'
+        client = self._get_client()
+        self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+                          client.get_metadata, 'some_key')
+
+    def test_get_metadata_throws_exception_for_request_id_mismatch(self):
+        self.response_parts['request_id'] = 'deadbeef'
+        client = self._get_client()
+        client._checksum = lambda _: self.response_parts['crc']
+        self.assertRaises(DataSourceSmartOS.JoyentMetadataFetchException,
+                          client.get_metadata, 'some_key')
+
+    def test_get_metadata_returns_None_if_value_not_found(self):
+        self.response_parts['payload'] = ''
+        self.response_parts['command'] = 'NOTFOUND'
+        self.response_parts['length'] = 17
+        client = self._get_client()
+        client._checksum = lambda _: self.response_parts['crc']
+        self.assertIsNone(client.get_metadata('some_key'))
-- 
cgit v1.2.3


From 1828ac3fa151ec7ff761b34305ed5fb85a9020d1 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 15:54:14 +0000
Subject: Add logging to JoyentMetadataClient.

---
 cloudinit/sources/DataSourceSmartOS.py | 8 +++++++-
 1 file changed, 7 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 694a011a..61dd044f 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -335,17 +335,23 @@ class JoyentMetadataClient(object):
                 'Request ID mismatch (expected: {0}; got {1}).'.format(
                     expected_request_id, frame_data['request_id']))
         if not frame_data.get('payload', None):
+            LOG.info('No value found.')
             return None
-        return util.b64d(frame_data['payload'])
+        value = util.b64d(frame_data['payload'])
+        LOG.info('Value "%s" found.', value)
+        return value
 
     def get_metadata(self, metadata_key):
+        LOG.info('Fetching metadata key "%s"...', metadata_key)
         request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
         message_body = '{0} GET {1}'.format(request_id,
                                             util.b64e(metadata_key))
         msg = 'V2 {0} {1} {2}\n'.format(
             len(message_body), self._checksum(message_body), message_body)
+        LOG.debug('Writing "%s" to serial port.', msg)
         self.serial.write(msg.encode('ascii'))
         response = self.serial.readline().decode('ascii')
+        LOG.debug('Read "%s" from serial port.', response)
         return self._get_value_from_frame(request_id, response)
 
 
-- 
cgit v1.2.3


From d52feae7ad38670964edebb0eea5db2c8c80f760 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 15:54:19 +0000
Subject: Ensure that the serial console is always closed.

---
 cloudinit/sources/DataSourceSmartOS.py          |  9 +++++----
 tests/unittests/test_datasource/test_smartos.py | 12 ++++++++++++
 2 files changed, 17 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 61dd044f..237fc140 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -30,9 +30,11 @@
 #       Comments with "@datadictionary" are snippets of the definition
 
 import binascii
+import contextlib
 import os
 import random
 import re
+
 import serial
 
 from cloudinit import log as logging
@@ -371,11 +373,10 @@ def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
     if not noun:
         return False
 
-    ser = get_serial(seed_device, seed_timeout)
+    with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
+        client = JoyentMetadataClient(ser)
+        response = client.get_metadata(noun)
 
-    client = JoyentMetadataClient(ser)
-    response = client.get_metadata(noun)
-    ser.close()
     if response is None:
         return default
 
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 39991cc2..28b41eaf 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -409,6 +409,18 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         self.assertEqual(dsrc.device_name_to_device('FOO'),
                          mydscfg['disk_aliases']['FOO'])
 
+    @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient')
+    @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial')
+    def test_serial_console_closed_on_error(self, get_serial, metadata_client):
+        class OurException(Exception):
+            pass
+        metadata_client.side_effect = OurException
+        try:
+            DataSourceSmartOS.query_data('noun', 'device', 0)
+        except OurException:
+            pass
+        self.assertEqual(1, get_serial.return_value.close.call_count)
+
 
 def apply_patches(patches):
     ret = []
-- 
cgit v1.2.3


From f4eb74ccc512d12afbb17dd9c678a5308ca64e9f Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 17:26:33 +0000
Subject: Switch logging from info to debug level.

---
 cloudinit/sources/DataSourceSmartOS.py | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 237fc140..d299cf26 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -337,14 +337,14 @@ class JoyentMetadataClient(object):
                 'Request ID mismatch (expected: {0}; got {1}).'.format(
                     expected_request_id, frame_data['request_id']))
         if not frame_data.get('payload', None):
-            LOG.info('No value found.')
+            LOG.debug('No value found.')
             return None
         value = util.b64d(frame_data['payload'])
-        LOG.info('Value "%s" found.', value)
+        LOG.debug('Value "%s" found.', value)
         return value
 
     def get_metadata(self, metadata_key):
-        LOG.info('Fetching metadata key "%s"...', metadata_key)
+        LOG.debug('Fetching metadata key "%s"...', metadata_key)
         request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
         message_body = '{0} GET {1}'.format(request_id,
                                             util.b64e(metadata_key))
-- 
cgit v1.2.3


From 5ae131cad02f383c9f3109ad0f51d918787b0196 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 17:27:22 +0000
Subject: Add link to Joyent metadata specification.

---
 cloudinit/sources/DataSourceSmartOS.py | 6 ++++++
 1 file changed, 6 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index d299cf26..ec2d10ae 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -309,6 +309,12 @@ class JoyentMetadataFetchException(Exception):
 
 
 class JoyentMetadataClient(object):
+    """
+    A client implementing v2 of the Joyent Metadata Protocol Specification.
+
+    The full specification can be found at
+    http://eng.joyent.com/mdata/protocol.html
+    """
 
     def __init__(self, serial):
         self.serial = serial
-- 
cgit v1.2.3


From 5524fd6336a9162aef7687e84705114aa3eb47cd Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 25 Mar 2015 17:59:42 +0000
Subject: Compile SmartOS line-parsing regex once.

---
 cloudinit/sources/DataSourceSmartOS.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index ec2d10ae..c9b497df 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -315,6 +315,10 @@ class JoyentMetadataClient(object):
     The full specification can be found at
     http://eng.joyent.com/mdata/protocol.html
     """
+    line_regex = re.compile(
+        r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
+        r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
+        r'( (?P<payload>.+))?)')
 
     def __init__(self, serial):
         self.serial = serial
@@ -324,11 +328,7 @@ class JoyentMetadataClient(object):
             binascii.crc32(body.encode('utf-8')) & 0xffffffff)
 
     def _get_value_from_frame(self, expected_request_id, frame):
-        regex = (
-            r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
-            r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
-            r'( (?P<payload>.+))?)')
-        frame_data = re.match(regex, frame).groupdict()
+        frame_data = self.line_regex.match(frame).groupdict()
         if int(frame_data['length']) != len(frame_data['body']):
             raise JoyentMetadataFetchException(
                 'Incorrect frame length given ({0} != {1}).'.format(
-- 
cgit v1.2.3


From a373e1097f6be460914e6cbbc897c6aa8e4aaefe Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 20:39:25 -0400
Subject: commit work in progress. tests pass.

---
 cloudinit/config/cc_snappy.py                      | 159 +++++++++++++++-----
 .../unittests/test_handler/test_handler_snappy.py  | 163 +++++++++++++++++++++
 2 files changed, 285 insertions(+), 37 deletions(-)
 create mode 100644 tests/unittests/test_handler/test_handler_snappy.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 133336d4..bef8c170 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -7,18 +7,21 @@ from cloudinit import util
 from cloudinit.settings import PER_INSTANCE
 
 import glob
+import six
+import tempfile
 import os
 
 LOG = logging.getLogger(__name__)
 
 frequency = PER_INSTANCE
-SNAPPY_ENV_PATH = "/writable/system-data/etc/snappy.env"
+SNAPPY_CMD = "snappy"
 
 BUILTIN_CFG = {
     'packages': [],
     'packages_dir': '/writable/user-data/cloud-init/click_packages',
     'ssh_enabled': False,
-    'system_snappy': "auto"
+    'system_snappy': "auto",
+    'configs': {},
 }
 
 """
@@ -27,43 +30,111 @@ snappy:
   ssh_enabled: True
   packages:
     - etcd
-    - {'name': 'pkg1', 'config': "wark"}
+    - pkg2
+  configs:
+    pkgname: config-blob
+    pkgname2: config-blob
 """
 
 
-def install_package(pkg_name, config=None):
-    cmd = ["snappy", "install"]
-    if config:
-        if os.path.isfile(config):
-            cmd.append("--config-file=" + config)
+def get_fs_package_ops(fspath):
+    if not fspath:
+        return []
+    ops = []
+    for snapfile in glob.glob(os.path.sep.join([fspath, '*.snap'])):
+        cfg = snapfile.rpartition(".")[0] + ".config"
+        name = os.path.basename(snapfile).rpartition(".")[0]
+        if not os.path.isfile(cfg):
+            cfg = None
+        ops.append(makeop('install', name, config=None,
+                    path=snapfile, cfgfile=cfg))
+    return ops
+
+
+def makeop(op, name, config=None, path=None, cfgfile=None):
+    return({'op': op, 'name': name, 'config': config, 'path': path,
+            'cfgfile': cfgfile})
+
+
+def get_package_ops(packages, configs, installed=None, fspath=None):
+    # get the install an config operations that should be done
+    if installed is None:
+        installed = read_installed_packages()
+
+    if not packages:
+        packages = []
+    if not configs:
+        configs = {}
+
+    ops = []
+    ops += get_fs_package_ops(fspath)
+
+    for name in packages:
+        ops.append(makeop('install', name, configs.get('name')))
+
+    to_install = [f['name'] for f in ops]
+
+    for name in configs:
+        if name in installed and name not in to_install:
+            ops.append(makeop('config', name, config=configs[name]))
+
+    # prefer config entries to filepath entries
+    for op in ops:
+        name = op['name']
+        if name in configs and op['op'] == 'install' and 'cfgfile' in op:
+            LOG.debug("preferring configs[%s] over '%s'", name, op['cfgfile'])
+            op['cfgfile'] = None
+            op['config'] = configs[op['name']]
+
+    return ops
+
+
+def render_snap_op(op, name, path=None, cfgfile=None, config=None):
+    if op not in ('install', 'config'):
+        raise ValueError("cannot render op '%s'" % op)
+
+    try:
+        cfg_tmpf = None
+        if config is not None:
+            if isinstance(config, six.binary_type):
+                cfg_bytes = config
+            elif isinstance(config, six.text_type):
+                cfg_bytes = config_data.encode()
+            else:
+                cfg_bytes = yaml.safe_dump(config).encode()
+
+            (fd, cfg_tmpf) = tempfile.mkstemp()
+            os.write(fd, config_data)
+            os.close(fd)
+            cfgfile = cfg_tmpf
+
+        cmd = [SNAPPY_CMD, op]
+        if op == 'install' and cfgfile:
+            cmd.append('--config=' + cfgfile)
+        elif op == 'config':
+            cmd.append(cfgfile)
+
+        util.subp(cmd)
+
+    finally:
+        if tmpfile:
+            os.unlink(tmpfile)
+
+
+def read_installed_packages():
+    return [p[0] for p in read_pkg_data()]
+
+
+def read_pkg_data():
+    out, err = util.subp([SNAPPY_CMD, "list"])
+    for line in out.splitlines()[1:]:
+        toks = line.split(sep=None, maxsplit=3)
+        if len(toks) == 3:
+            (name, date, version) = toks
+            dev = None
         else:
-            cmd.append("--config=" + config)
-    cmd.append(pkg_name)
-    util.subp(cmd)
-
-
-def install_packages(package_dir, packages):
-    local_pkgs = glob.glob(os.path.sep.join([package_dir, '*.click']))
-    LOG.debug("installing local packages %s" % local_pkgs)
-    if local_pkgs:
-        for pkg in local_pkgs:
-            cfg = pkg.replace(".click", ".config")
-            if not os.path.isfile(cfg):
-                cfg = None
-            install_package(pkg, config=cfg)
-
-    LOG.debug("installing click packages")
-    if packages:
-        for pkg in packages:
-            if not pkg:
-                continue
-            if isinstance(pkg, str):
-                name = pkg
-                config = None
-            elif pkg:
-                name = pkg.get('name', pkg)
-                config = pkg.get('config')
-            install_package(pkg_name=name, config=config)
+            (name, date, version, dev) = toks
+        pkgs.append((name, date, version, dev,))
 
 
 def disable_enable_ssh(enabled):
@@ -107,7 +178,21 @@ def handle(name, cfg, cloud, log, args):
         LOG.debug("%s: 'auto' mode, and system not snappy", name)
         return
 
-    install_packages(mycfg['packages_dir'],
-                     mycfg['packages'])
+    pkg_ops = get_package_ops(packages=mycfg['packages'],
+                              configs=mycfg['configs'],
+                              fspath=mycfg['packages_dir'])
+
+    fails = []
+    for pkg_op in pkg_ops:
+        try:
+            render_snap_op(op=pkg_op['op'], name=pkg_op['name'],
+                           cfgfile=pkg_op['cfgfile'], config=pkg_op['config'])
+        except Exception as e:
+            fails.append((pkg_op, e,))
+            LOG.warn("'%s' failed for '%s': %s",
+                     pkg_op['op'], pkg_op['name'], e)
 
     disable_enable_ssh(mycfg.get('ssh_enabled', False))
+
+    if fails:
+        raise Exception("failed to install/configure snaps")
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
new file mode 100644
index 00000000..6b6d3584
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -0,0 +1,163 @@
+from cloudinit.config.cc_snappy import (makeop, get_package_ops)
+from cloudinit import util
+from .. import helpers as t_help
+
+import os
+import tempfile
+
+class TestInstallPackages(t_help.TestCase):
+    def setUp(self):
+        super(TestInstallPackages, self).setUp()
+        self.unapply = []
+
+        # by default 'which' has nothing in its path
+        self.apply_patches([(util, 'subp', self._subp)])
+        self.subp_called = []
+        self.snapcmds = []
+        self.tmp = tempfile.mkdtemp()
+
+    def tearDown(self):
+        apply_patches([i for i in reversed(self.unapply)])
+
+    def apply_patches(self, patches):
+        ret = apply_patches(patches)
+        self.unapply += ret
+
+    def _subp(self, *args, **kwargs):
+        # supports subp calling with cmd as args or kwargs
+        if 'args' not in kwargs:
+            kwargs['args'] = args[0]
+        self.subp_called.append(kwargs)
+        snap_cmds = []
+        args = kwargs['args']
+        if args[0:2] == ['snappy', 'config']:
+            if args[3] == "-":
+                config = kwargs.get('data', '')
+            else:
+                with open(args[3], "rb") as fp:
+                    config = fp.read()
+            snap_cmds.append(('config', args[2], config,))
+        elif args[0:2] == ['snappy', 'install']:
+            # basically parse the snappy command and add
+            # to snap_installs a tuple (pkg, config)
+            config = None
+            pkg = None
+            for arg in args[2:]:
+                if arg.startswith("--config="):
+                    cfgfile = arg.partition("=")[2]
+                    if cfgfile == "-":
+                        config = kwargs.get('data', '')
+                    elif cfgfile:
+                        with open(cfgfile, "rb") as fp:
+                            config = fp.read()
+                elif not pkg and not arg.startswith("-"):
+                    pkg = os.path.basename(arg)
+            self.snap_installs.append(('install', pkg, config,))
+
+    def test_package_ops_1(self):
+        ret = get_package_ops(
+            packages=['pkg1', 'pkg2', 'pkg3'],
+            configs={'pkg2': b'mycfg2'}, installed=[])
+        self.assertEqual(ret,
+            [makeop('install', 'pkg1', None, None),
+             makeop('install', 'pkg2', b'mycfg2', None),
+             makeop('install', 'pkg3', None, None)])
+
+    def test_package_ops_config_only(self):
+        ret = get_package_ops(
+            packages=None,
+            configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2'])
+        self.assertEqual(ret,
+            [makeop('config', 'pkg2', b'mycfg2')])
+
+    def test_package_ops_install_and_config(self):
+        ret = get_package_ops(
+            packages=['pkg3', 'pkg2'],
+            configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'},
+            installed=['xinstalled'])
+        self.assertEqual(ret,
+            [makeop('install', 'pkg3'),
+             makeop('install', 'pkg2', b'mycfg2'),
+             makeop('config', 'xinstalled', b'xcfg')])
+
+    def test_package_ops_with_file(self):
+        t_help.populate_dir(self.tmp,
+            {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
+             "snapf2.snap": b"foo2", "foo.bar": "ignored"})
+        ret = get_package_ops(
+            packages=['pkg1'], configs={}, installed=[], fspath=self.tmp)
+        self.assertEqual(ret,
+            [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
+                         cfgfile="snapf1.config"),
+             makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
+             makeop('install', 'pkg1')])
+
+        
+def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
+    if cfgfile:
+        cfgfile = os.path.sep.join([tmpd, cfgfile])
+    if path:
+        path = os.path.sep.join([tmpd, path])
+    return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile))
+
+#    def test_local_snaps_no_config(self):
+#        t_help.populate_dir(self.tmp,
+#            {"snap1.snap": b"foo", "snap2.snap": b"foo", "foosnap.txt": b"foo"})
+#        cc_snappy.install_packages(self.tmp, None)
+#        self.assertEqual(self.snap_installs,
+#            [("snap1.snap", None), ("snap2.snap", None)])
+#
+#    def test_local_snaps_mixed_config(self):
+#        t_help.populate_dir(self.tmp,
+#            {"snap1.snap": b"foo", "snap2.snap": b"snap2",
+#             "snap1.config": b"snap1config"})
+#        cc_snappy.install_packages(self.tmp, None)
+#        self.assertEqual(self.snap_installs,
+#            [("snap1.snap", b"snap1config"), ("snap2.snap", None)])
+#
+#    def test_local_snaps_all_config(self):
+#        t_help.populate_dir(self.tmp,
+#            {"snap1.snap": "foo", "snap1.config": b"snap1config",
+#             "snap2.snap": "snap2", "snap2.config": b"snap2config"})
+#        cc_snappy.install_packages(self.tmp, None)
+#        self.assertEqual(self.snap_installs,
+#            [("snap1.snap", b"snap1config"), ("snap2.snap", b"snap2config")])
+#
+#    def test_local_snaps_and_packages(self):
+#        t_help.populate_dir(self.tmp,
+#            {"snap1.snap": "foo", "snap1.config": b"snap1config"})
+#        cc_snappy.install_packages(self.tmp, ["snap-in-store"])
+#        self.assertEqual(self.snap_installs,
+#            [("snap1.snap", b"snap1config"), ("snap-in-store", None)])
+#
+#    def test_packages_no_config(self):
+#        cc_snappy.install_packages(self.tmp, ["snap-in-store"])
+#        self.assertEqual(self.snap_installs,
+#            [("snap-in-store", None)])
+#
+#    def test_packages_mixed_config(self):
+#        cc_snappy.install_packages(self.tmp,
+#            ["snap-in-store",
+#             {'name': 'snap2-in-store', 'config': b"foo"}])
+#        self.assertEqual(self.snap_installs,
+#            [("snap-in-store", None), ("snap2-in-store", b"foo")])
+#
+#    def test_packages_all_config(self):
+#        cc_snappy.install_packages(self.tmp,
+#            [{'name': 'snap1-in-store', 'config': b"boo"},
+#             {'name': 'snap2-in-store', 'config': b"wark"}])
+#        self.assertEqual(self.snap_installs,
+#            [("snap1-in-store", b"boo"), ("snap2-in-store", b"wark")])
+#
+#
+
+def apply_patches(patches):
+    ret = []
+    for (ref, name, replace) in patches:
+        if replace is None:
+            continue
+        orig = getattr(ref, name)
+        setattr(ref, name, replace)
+        ret.append((ref, name, orig))
+    return ret
+
-- 
cgit v1.2.3


From bd7165dd67338f742f999fb2c53ec5f67fc66477 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 21:14:17 -0400
Subject: start of snap_op tests

---
 cloudinit/config/cc_snappy.py                      | 10 ++++--
 .../unittests/test_handler/test_handler_snappy.py  | 40 +++++++++++++++++++---
 2 files changed, 44 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bef8c170..cf441c92 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -114,11 +114,17 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
         elif op == 'config':
             cmd.append(cfgfile)
 
+        if op == 'install':
+            if path:
+                cmd.append(path)
+            else:
+                cmd.append(name)
+
         util.subp(cmd)
 
     finally:
-        if tmpfile:
-            os.unlink(tmpfile)
+        if cfg_tmpf:
+            os.unlink(cfg_tmpf)
 
 
 def read_installed_packages():
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 6b6d3584..7dc77970 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -1,4 +1,5 @@
-from cloudinit.config.cc_snappy import (makeop, get_package_ops)
+from cloudinit.config.cc_snappy import (
+    makeop, get_package_ops, render_snap_op)
 from cloudinit import util
 from .. import helpers as t_help
 
@@ -36,7 +37,7 @@ class TestInstallPackages(t_help.TestCase):
             else:
                 with open(args[3], "rb") as fp:
                     config = fp.read()
-            snap_cmds.append(('config', args[2], config,))
+            self.snapcmds.append(['config', args[2], config])
         elif args[0:2] == ['snappy', 'install']:
             # basically parse the snappy command and add
             # to snap_installs a tuple (pkg, config)
@@ -51,8 +52,8 @@ class TestInstallPackages(t_help.TestCase):
                         with open(cfgfile, "rb") as fp:
                             config = fp.read()
                 elif not pkg and not arg.startswith("-"):
-                    pkg = os.path.basename(arg)
-            self.snap_installs.append(('install', pkg, config,))
+                    pkg = arg
+            self.snapcmds.append(['install', pkg, config])
 
     def test_package_ops_1(self):
         ret = get_package_ops(
@@ -92,6 +93,37 @@ class TestInstallPackages(t_help.TestCase):
              makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
              makeop('install', 'pkg1')])
 
+    #def render_snap_op(op, name, path=None, cfgfile=None, config=None):
+    def test_render_op_localsnap(self):
+        t_help.populate_dir(self.tmp, {"snapf1.snap": b"foo1"})
+        op = makeop_tmpd(self.tmp, 'install', 'snapf1',
+                         path='snapf1.snap')
+        render_snap_op(**op)
+        self.assertEqual(self.snapcmds,
+            [['install', op['path'], None]])
+
+    def test_render_op_localsnap_localconfig(self):
+        t_help.populate_dir(self.tmp,
+            {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'})
+        op = makeop_tmpd(self.tmp, 'install', 'snapf1',
+                         path='snapf1.snap', cfgfile='snapf1.config')
+        render_snap_op(**op)
+        self.assertEqual(self.snapcmds,
+            [['install', op['path'], b'snapf1cfg']])
+
+    def test_render_op_localsnap_config(self):
+        pass
+
+    def test_render_op_snap(self):
+        pass
+
+    def test_render_op_snap_config(self):
+        pass
+
+    def test_render_op_config(self):
+        pass
+
+
         
 def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
     if cfgfile:
-- 
cgit v1.2.3


From df43c6bd3726c9a34b9f8ff4bbf75957aa751011 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 21:55:26 -0400
Subject: pep8, and some more tests

---
 cloudinit/config/cc_snappy.py                      |  13 +-
 .../unittests/test_handler/test_handler_snappy.py  | 131 ++++++++-------------
 2 files changed, 57 insertions(+), 87 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index cf441c92..c926ae0a 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -99,26 +99,25 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
             if isinstance(config, six.binary_type):
                 cfg_bytes = config
             elif isinstance(config, six.text_type):
-                cfg_bytes = config_data.encode()
+                cfg_bytes = config.encode()
             else:
                 cfg_bytes = yaml.safe_dump(config).encode()
 
             (fd, cfg_tmpf) = tempfile.mkstemp()
-            os.write(fd, config_data)
+            os.write(fd, cfg_bytes)
             os.close(fd)
             cfgfile = cfg_tmpf
 
         cmd = [SNAPPY_CMD, op]
-        if op == 'install' and cfgfile:
-            cmd.append('--config=' + cfgfile)
-        elif op == 'config':
-            cmd.append(cfgfile)
-
         if op == 'install':
+            if cfgfile:
+                cmd.append('--config=' + cfgfile)
             if path:
                 cmd.append(path)
             else:
                 cmd.append(name)
+        elif op == 'config':
+            cmd += [name, cfgfile]
 
         util.subp(cmd)
 
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 7dc77970..8759a07d 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -4,8 +4,10 @@ from cloudinit import util
 from .. import helpers as t_help
 
 import os
+import shutil
 import tempfile
 
+
 class TestInstallPackages(t_help.TestCase):
     def setUp(self):
         super(TestInstallPackages, self).setUp()
@@ -15,15 +17,19 @@ class TestInstallPackages(t_help.TestCase):
         self.apply_patches([(util, 'subp', self._subp)])
         self.subp_called = []
         self.snapcmds = []
-        self.tmp = tempfile.mkdtemp()
+        self.tmp = tempfile.mkdtemp(prefix="TestInstallPackages")
 
     def tearDown(self):
         apply_patches([i for i in reversed(self.unapply)])
+        shutil.rmtree(self.tmp)
 
     def apply_patches(self, patches):
         ret = apply_patches(patches)
         self.unapply += ret
 
+    def populate_tmp(self, files):
+        return t_help.populate_dir(self.tmp, files)
+
     def _subp(self, *args, **kwargs):
         # supports subp calling with cmd as args or kwargs
         if 'args' not in kwargs:
@@ -31,6 +37,8 @@ class TestInstallPackages(t_help.TestCase):
         self.subp_called.append(kwargs)
         snap_cmds = []
         args = kwargs['args']
+        # here we basically parse the snappy command invoked
+        # and append to snapcmds a list of (mode, pkg, config)
         if args[0:2] == ['snappy', 'config']:
             if args[3] == "-":
                 config = kwargs.get('data', '')
@@ -39,8 +47,6 @@ class TestInstallPackages(t_help.TestCase):
                     config = fp.read()
             self.snapcmds.append(['config', args[2], config])
         elif args[0:2] == ['snappy', 'install']:
-            # basically parse the snappy command and add
-            # to snap_installs a tuple (pkg, config)
             config = None
             pkg = None
             for arg in args[2:]:
@@ -59,72 +65,88 @@ class TestInstallPackages(t_help.TestCase):
         ret = get_package_ops(
             packages=['pkg1', 'pkg2', 'pkg3'],
             configs={'pkg2': b'mycfg2'}, installed=[])
-        self.assertEqual(ret,
-            [makeop('install', 'pkg1', None, None),
-             makeop('install', 'pkg2', b'mycfg2', None),
-             makeop('install', 'pkg3', None, None)])
+        self.assertEqual(
+            ret, [makeop('install', 'pkg1', None, None),
+                  makeop('install', 'pkg2', b'mycfg2', None),
+                  makeop('install', 'pkg3', None, None)])
 
     def test_package_ops_config_only(self):
         ret = get_package_ops(
             packages=None,
             configs={'pkg2': b'mycfg2'}, installed=['pkg1', 'pkg2'])
-        self.assertEqual(ret,
-            [makeop('config', 'pkg2', b'mycfg2')])
+        self.assertEqual(
+            ret, [makeop('config', 'pkg2', b'mycfg2')])
 
     def test_package_ops_install_and_config(self):
         ret = get_package_ops(
             packages=['pkg3', 'pkg2'],
             configs={'pkg2': b'mycfg2', 'xinstalled': b'xcfg'},
             installed=['xinstalled'])
-        self.assertEqual(ret,
-            [makeop('install', 'pkg3'),
-             makeop('install', 'pkg2', b'mycfg2'),
-             makeop('config', 'xinstalled', b'xcfg')])
+        self.assertEqual(
+            ret, [makeop('install', 'pkg3'),
+                  makeop('install', 'pkg2', b'mycfg2'),
+                  makeop('config', 'xinstalled', b'xcfg')])
 
     def test_package_ops_with_file(self):
-        t_help.populate_dir(self.tmp,
+        self.populate_tmp(
             {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
              "snapf2.snap": b"foo2", "foo.bar": "ignored"})
         ret = get_package_ops(
             packages=['pkg1'], configs={}, installed=[], fspath=self.tmp)
-        self.assertEqual(ret,
+        self.assertEqual(
+            ret,
             [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
                          cfgfile="snapf1.config"),
              makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
              makeop('install', 'pkg1')])
 
-    #def render_snap_op(op, name, path=None, cfgfile=None, config=None):
+    def test_package_ops_config_overrides_file(self):
+        # config data overrides local file .config
+        self.populate_tmp(
+            {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg"})
+        ret = get_package_ops(
+            packages=[], configs={'snapf1': 'snapf1cfg-config'},
+            installed=[], fspath=self.tmp)
+        self.assertEqual(
+            ret, [makeop_tmpd(self.tmp, 'install', 'snapf1',
+                              path="snapf1.snap", config="snapf1cfg-config")])
+
     def test_render_op_localsnap(self):
-        t_help.populate_dir(self.tmp, {"snapf1.snap": b"foo1"})
+        self.populate_tmp({"snapf1.snap": b"foo1"})
         op = makeop_tmpd(self.tmp, 'install', 'snapf1',
                          path='snapf1.snap')
         render_snap_op(**op)
-        self.assertEqual(self.snapcmds,
-            [['install', op['path'], None]])
+        self.assertEqual(
+            self.snapcmds, [['install', op['path'], None]])
 
     def test_render_op_localsnap_localconfig(self):
-        t_help.populate_dir(self.tmp,
+        self.populate_tmp(
             {"snapf1.snap": b"foo1", 'snapf1.config': b'snapf1cfg'})
         op = makeop_tmpd(self.tmp, 'install', 'snapf1',
                          path='snapf1.snap', cfgfile='snapf1.config')
         render_snap_op(**op)
-        self.assertEqual(self.snapcmds,
-            [['install', op['path'], b'snapf1cfg']])
-
-    def test_render_op_localsnap_config(self):
-        pass
+        self.assertEqual(
+            self.snapcmds, [['install', op['path'], b'snapf1cfg']])
 
     def test_render_op_snap(self):
-        pass
+        op = makeop('install', 'snapf1')
+        render_snap_op(**op)
+        self.assertEqual(
+            self.snapcmds, [['install', 'snapf1', None]])
 
     def test_render_op_snap_config(self):
-        pass
+        op = makeop('install', 'snapf1', config=b'myconfig')
+        render_snap_op(**op)
+        self.assertEqual(
+            self.snapcmds, [['install', 'snapf1', b'myconfig']])
 
     def test_render_op_config(self):
-        pass
+        op = makeop('config', 'snapf1', config=b'myconfig')
+        render_snap_op(**op)
+        self.assertEqual(
+            self.snapcmds, [['config', 'snapf1', b'myconfig']])
 
 
-        
 def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
     if cfgfile:
         cfgfile = os.path.sep.join([tmpd, cfgfile])
@@ -132,56 +154,6 @@ def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
         path = os.path.sep.join([tmpd, path])
     return(makeop(op=op, name=name, config=config, path=path, cfgfile=cfgfile))
 
-#    def test_local_snaps_no_config(self):
-#        t_help.populate_dir(self.tmp,
-#            {"snap1.snap": b"foo", "snap2.snap": b"foo", "foosnap.txt": b"foo"})
-#        cc_snappy.install_packages(self.tmp, None)
-#        self.assertEqual(self.snap_installs,
-#            [("snap1.snap", None), ("snap2.snap", None)])
-#
-#    def test_local_snaps_mixed_config(self):
-#        t_help.populate_dir(self.tmp,
-#            {"snap1.snap": b"foo", "snap2.snap": b"snap2",
-#             "snap1.config": b"snap1config"})
-#        cc_snappy.install_packages(self.tmp, None)
-#        self.assertEqual(self.snap_installs,
-#            [("snap1.snap", b"snap1config"), ("snap2.snap", None)])
-#
-#    def test_local_snaps_all_config(self):
-#        t_help.populate_dir(self.tmp,
-#            {"snap1.snap": "foo", "snap1.config": b"snap1config",
-#             "snap2.snap": "snap2", "snap2.config": b"snap2config"})
-#        cc_snappy.install_packages(self.tmp, None)
-#        self.assertEqual(self.snap_installs,
-#            [("snap1.snap", b"snap1config"), ("snap2.snap", b"snap2config")])
-#
-#    def test_local_snaps_and_packages(self):
-#        t_help.populate_dir(self.tmp,
-#            {"snap1.snap": "foo", "snap1.config": b"snap1config"})
-#        cc_snappy.install_packages(self.tmp, ["snap-in-store"])
-#        self.assertEqual(self.snap_installs,
-#            [("snap1.snap", b"snap1config"), ("snap-in-store", None)])
-#
-#    def test_packages_no_config(self):
-#        cc_snappy.install_packages(self.tmp, ["snap-in-store"])
-#        self.assertEqual(self.snap_installs,
-#            [("snap-in-store", None)])
-#
-#    def test_packages_mixed_config(self):
-#        cc_snappy.install_packages(self.tmp,
-#            ["snap-in-store",
-#             {'name': 'snap2-in-store', 'config': b"foo"}])
-#        self.assertEqual(self.snap_installs,
-#            [("snap-in-store", None), ("snap2-in-store", b"foo")])
-#
-#    def test_packages_all_config(self):
-#        cc_snappy.install_packages(self.tmp,
-#            [{'name': 'snap1-in-store', 'config': b"boo"},
-#             {'name': 'snap2-in-store', 'config': b"wark"}])
-#        self.assertEqual(self.snap_installs,
-#            [("snap1-in-store", b"boo"), ("snap2-in-store", b"wark")])
-#
-#
 
 def apply_patches(patches):
     ret = []
@@ -192,4 +164,3 @@ def apply_patches(patches):
         setattr(ref, name, replace)
         ret.append((ref, name, orig))
     return ret
-
-- 
cgit v1.2.3


From 973c8b05358fe6ad1ce7adb25cb743ef4d38d792 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 21:55:45 -0400
Subject: pep8

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index c926ae0a..d1447fe5 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -47,7 +47,7 @@ def get_fs_package_ops(fspath):
         if not os.path.isfile(cfg):
             cfg = None
         ops.append(makeop('install', name, config=None,
-                    path=snapfile, cfgfile=cfg))
+                   path=snapfile, cfgfile=cfg))
     return ops
 
 
-- 
cgit v1.2.3


From 4c341a87d4b0804565e74e6335a0293dab6c0c7b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:10:01 -0400
Subject: add tests for data types

---
 cloudinit/config/cc_snappy.py                      |  2 +-
 .../unittests/test_handler/test_handler_snappy.py  | 35 +++++++++++++++++++++-
 2 files changed, 35 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index d1447fe5..bd928e54 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -101,7 +101,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
             elif isinstance(config, six.text_type):
                 cfg_bytes = config.encode()
             else:
-                cfg_bytes = yaml.safe_dump(config).encode()
+                cfg_bytes = util.yaml_dumps(config)
 
             (fd, cfg_tmpf) = tempfile.mkstemp()
             os.write(fd, cfg_bytes)
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 8759a07d..81d891d2 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -140,12 +140,45 @@ class TestInstallPackages(t_help.TestCase):
         self.assertEqual(
             self.snapcmds, [['install', 'snapf1', b'myconfig']])
 
-    def test_render_op_config(self):
+    def test_render_op_config_bytes(self):
         op = makeop('config', 'snapf1', config=b'myconfig')
         render_snap_op(**op)
         self.assertEqual(
             self.snapcmds, [['config', 'snapf1', b'myconfig']])
 
+    def test_render_op_config_string(self):
+        mycfg = 'myconfig: foo\nhisconfig: bar\n'
+        op = makeop('config', 'snapf1', config=mycfg)
+        render_snap_op(**op)
+        self.assertEqual(
+            self.snapcmds, [['config', 'snapf1', mycfg.encode()]])
+
+    def test_render_op_config_dict(self):
+        # config entry for package can be a dict, not a string blob
+        mycfg = {'foo': 'bar'}
+        op = makeop('config', 'snapf1', config=mycfg)
+        render_snap_op(**op)
+        # snapcmds is a list of 3-entry lists. data_found will be the
+        # blob of data in the file in 'snappy install --config=<file>'
+        data_found = self.snapcmds[0][2]
+        self.assertEqual(mycfg, util.load_yaml(data_found))
+
+    def test_render_op_config_list(self):
+        # config entry for package can be a list, not a string blob
+        mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}]
+        op = makeop('config', 'snapf1', config=mycfg)
+        render_snap_op(**op)
+        data_found = self.snapcmds[0][2]
+        self.assertEqual(mycfg, util.load_yaml(data_found, allowed=(list,)))
+
+    def test_render_op_config_int(self):
+        # config entry for package can be a list, not a string blob
+        mycfg = 1
+        op = makeop('config', 'snapf1', config=mycfg)
+        render_snap_op(**op)
+        data_found = self.snapcmds[0][2]
+        self.assertEqual(mycfg, util.load_yaml(data_found, allowed=(int,)))
+
 
 def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
     if cfgfile:
-- 
cgit v1.2.3


From 5e012b1e5f51f82e503a760c8c9c0e2c66aedfee Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:16:13 -0400
Subject: prefer snappy-go to snappy

---
 cloudinit/config/cc_snappy.py | 13 +++++++++++--
 1 file changed, 11 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bd928e54..dbdc402c 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -168,6 +168,14 @@ def system_is_snappy():
     return False
 
 
+def set_snappy_command():
+    if util.which("snappy-go"):
+        SNAPPY_COMMAND = "snappy-go"
+    else:
+        SNAPPY_COMMAND = "snappy"
+    LOG.debug("snappy command is '%s'", SNAPPY_COMMAND)
+
+
 def handle(name, cfg, cloud, log, args):
     cfgin = cfg.get('snappy')
     if not cfgin:
@@ -187,11 +195,12 @@ def handle(name, cfg, cloud, log, args):
                               configs=mycfg['configs'],
                               fspath=mycfg['packages_dir'])
 
+    set_snappy_command()
+
     fails = []
     for pkg_op in pkg_ops:
         try:
-            render_snap_op(op=pkg_op['op'], name=pkg_op['name'],
-                           cfgfile=pkg_op['cfgfile'], config=pkg_op['config'])
+            render_snap_op(**pkg_op)
         except Exception as e:
             fails.append((pkg_op, e,))
             LOG.warn("'%s' failed for '%s': %s",
-- 
cgit v1.2.3


From d0eacf97c72b2613a3f1ce179e284d5aa98744dc Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:18:16 -0400
Subject: encode needed for yaml_dumps

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index dbdc402c..adb25bc2 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -101,7 +101,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
             elif isinstance(config, six.text_type):
                 cfg_bytes = config.encode()
             else:
-                cfg_bytes = util.yaml_dumps(config)
+                cfg_bytes = util.yaml_dumps(config).encode()
 
             (fd, cfg_tmpf) = tempfile.mkstemp()
             os.write(fd, cfg_bytes)
-- 
cgit v1.2.3


From ef4a19658f354f1cb52b59c093d38d8448e26a70 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:42:26 -0400
Subject: rad_pkg_data: return data, fix undefined variable

---
 cloudinit/config/cc_snappy.py | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index adb25bc2..61c70f03 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -132,6 +132,7 @@ def read_installed_packages():
 
 def read_pkg_data():
     out, err = util.subp([SNAPPY_CMD, "list"])
+    pkg_data = []
     for line in out.splitlines()[1:]:
         toks = line.split(sep=None, maxsplit=3)
         if len(toks) == 3:
@@ -139,7 +140,8 @@ def read_pkg_data():
             dev = None
         else:
             (name, date, version, dev) = toks
-        pkgs.append((name, date, version, dev,))
+        pkg_data.append((name, date, version, dev,))
+    return pkg_data
 
 
 def disable_enable_ssh(enabled):
-- 
cgit v1.2.3


From f32a0c32081a4c38b9738bd65a2efc35f26ee983 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:51:59 -0400
Subject: improve doc, change 'click_packages' path to be 'snaps'

---
 cloudinit/config/cc_snappy.py | 45 ++++++++++++++++++++++++++++++-------------
 1 file changed, 32 insertions(+), 13 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 61c70f03..09a8d239 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -1,5 +1,36 @@
 # vi: ts=4 expandtab
 #
+"""
+snappy modules allows configuration of snappy.
+Example config:
+  #cloud-config
+  snappy:
+    system_snappy: auto
+    ssh_enabled: False
+    packages: [etcd, pkg2]
+    configs:
+      pkgname: pkgname-config-blob
+      pkg2: config-blob
+    packages_dir: '/writable/user-data/cloud-init/snaps'
+
+ - ssh_enabled:
+   This defaults to 'False'.  Set to a non-false value to enable ssh service
+ - snap installation and config
+   The above would install 'etcd', and then install 'pkg2' with a
+   '--config=<file>' argument where 'file' as 'config-blob' inside it.
+   If 'pkgname' is installed already, then 'snappy config pkgname <file>'
+   will be called where 'file' has 'pkgname-config-blob' as its content.
+
+   If 'packages_dir' has files in it that end in '.snap', then they are
+   installed.  Given 3 files:
+     <packages_dir>/foo.snap
+     <packages_dir>/foo.config
+     <packages_dir>/bar.snap
+   cloud-init will invoke:
+     snappy install "--config=<packages_dir>/foo.config" \
+         <packages_dir>/foo.snap
+     snappy install <packages_dir>/bar.snap
+"""
 
 from cloudinit import log as logging
 from cloudinit import templater
@@ -18,24 +49,12 @@ SNAPPY_CMD = "snappy"
 
 BUILTIN_CFG = {
     'packages': [],
-    'packages_dir': '/writable/user-data/cloud-init/click_packages',
+    'packages_dir': '/writable/user-data/cloud-init/snaps',
     'ssh_enabled': False,
     'system_snappy': "auto",
     'configs': {},
 }
 
-"""
-snappy:
-  system_snappy: auto
-  ssh_enabled: True
-  packages:
-    - etcd
-    - pkg2
-  configs:
-    pkgname: config-blob
-    pkgname2: config-blob
-"""
-
 
 def get_fs_package_ops(fspath):
     if not fspath:
-- 
cgit v1.2.3


From feab0f913b2c3e98cf5200ea2dd7c19aed347395 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 22:56:43 -0400
Subject: mention ubuntu-core

---
 cloudinit/config/cc_snappy.py | 4 ++++
 1 file changed, 4 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 09a8d239..de6fae4b 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -30,6 +30,10 @@ Example config:
      snappy install "--config=<packages_dir>/foo.config" \
          <packages_dir>/foo.snap
      snappy install <packages_dir>/bar.snap
+
+   Note, that if provided a 'configs' entry for 'ubuntu-core', then
+   cloud-init will invoke: snappy config ubuntu-core <config>
+   Allowing you to configure ubuntu-core in this way.
 """
 
 from cloudinit import log as logging
-- 
cgit v1.2.3


From b9cdcb6a8ef499c6e3be178fb5f59d369eb3b169 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 26 Mar 2015 23:22:12 -0400
Subject: fix scope so that SNAPPY_CMD is affected by set_snappy_command

---
 cloudinit/config/cc_snappy.py | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index de6fae4b..e664234a 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -194,11 +194,12 @@ def system_is_snappy():
 
 
 def set_snappy_command():
+    global SNAPPY_CMD
     if util.which("snappy-go"):
-        SNAPPY_COMMAND = "snappy-go"
+        SNAPPY_CMD = "snappy-go"
     else:
-        SNAPPY_COMMAND = "snappy"
-    LOG.debug("snappy command is '%s'", SNAPPY_COMMAND)
+        SNAPPY_CMD = "snappy"
+    LOG.debug("snappy command is '%s'", SNAPPY_CMD)
 
 
 def handle(name, cfg, cloud, log, args):
-- 
cgit v1.2.3


From b7b8004bc58f1243d023092e67f3b78743086ff2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 11:11:05 -0400
Subject: change 'configs' to 'config', and namespace input to 'snappy config'

the input to 'snappy config <packagename>' is expected to have
config:
  <packagename>:
    content:

So here we pad that input correctly.  Note, that a .config file
on disk is not modified.

Also, we change 'configs' to just be 'config', to be possibly compatible
with the a future 'snappy config /' that dumped:
 config:
   pkg1: data1
   pkg2: data2
---
 cloudinit/config/cc_snappy.py                      | 29 +++++-----
 .../unittests/test_handler/test_handler_snappy.py  | 61 ++++++++++++++++------
 2 files changed, 61 insertions(+), 29 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index e664234a..a3af98a6 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -8,9 +8,11 @@ Example config:
     system_snappy: auto
     ssh_enabled: False
     packages: [etcd, pkg2]
-    configs:
-      pkgname: pkgname-config-blob
-      pkg2: config-blob
+    config:
+      pkgname:
+        key2: value2
+      pkg2:
+        key1: value1
     packages_dir: '/writable/user-data/cloud-init/snaps'
 
  - ssh_enabled:
@@ -31,7 +33,7 @@ Example config:
          <packages_dir>/foo.snap
      snappy install <packages_dir>/bar.snap
 
-   Note, that if provided a 'configs' entry for 'ubuntu-core', then
+   Note, that if provided a 'config' entry for 'ubuntu-core', then
    cloud-init will invoke: snappy config ubuntu-core <config>
    Allowing you to configure ubuntu-core in this way.
 """
@@ -56,7 +58,7 @@ BUILTIN_CFG = {
     'packages_dir': '/writable/user-data/cloud-init/snaps',
     'ssh_enabled': False,
     'system_snappy': "auto",
-    'configs': {},
+    'config': {},
 }
 
 
@@ -119,15 +121,14 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
     try:
         cfg_tmpf = None
         if config is not None:
-            if isinstance(config, six.binary_type):
-                cfg_bytes = config
-            elif isinstance(config, six.text_type):
-                cfg_bytes = config.encode()
-            else:
-                cfg_bytes = util.yaml_dumps(config).encode()
-
+            # input to 'snappy config packagename' must have nested data. odd.
+            # config:
+            #   packagename:
+            #      config
+            # Note, however, we do not touch config files on disk.
+            nested_cfg = {'config': {name: config}}
             (fd, cfg_tmpf) = tempfile.mkstemp()
-            os.write(fd, cfg_bytes)
+            os.write(fd, util.yaml_dumps(nested_cfg).encode())
             os.close(fd)
             cfgfile = cfg_tmpf
 
@@ -218,7 +219,7 @@ def handle(name, cfg, cloud, log, args):
         return
 
     pkg_ops = get_package_ops(packages=mycfg['packages'],
-                              configs=mycfg['configs'],
+                              configs=mycfg['config'],
                               fspath=mycfg['packages_dir'])
 
     set_snappy_command()
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 81d891d2..f56a22f7 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -6,6 +6,9 @@ from .. import helpers as t_help
 import os
 import shutil
 import tempfile
+import yaml
+
+ALLOWED = (dict, list, int, str)
 
 
 class TestInstallPackages(t_help.TestCase):
@@ -44,7 +47,7 @@ class TestInstallPackages(t_help.TestCase):
                 config = kwargs.get('data', '')
             else:
                 with open(args[3], "rb") as fp:
-                    config = fp.read()
+                    config = yaml.safe_load(fp.read())
             self.snapcmds.append(['config', args[2], config])
         elif args[0:2] == ['snappy', 'install']:
             config = None
@@ -56,7 +59,7 @@ class TestInstallPackages(t_help.TestCase):
                         config = kwargs.get('data', '')
                     elif cfgfile:
                         with open(cfgfile, "rb") as fp:
-                            config = fp.read()
+                            config = yaml.safe_load(fp.read())
                 elif not pkg and not arg.startswith("-"):
                     pkg = arg
             self.snapcmds.append(['install', pkg, config])
@@ -126,7 +129,7 @@ class TestInstallPackages(t_help.TestCase):
                          path='snapf1.snap', cfgfile='snapf1.config')
         render_snap_op(**op)
         self.assertEqual(
-            self.snapcmds, [['install', op['path'], b'snapf1cfg']])
+            self.snapcmds, [['install', op['path'], 'snapf1cfg']])
 
     def test_render_op_snap(self):
         op = makeop('install', 'snapf1')
@@ -135,49 +138,77 @@ class TestInstallPackages(t_help.TestCase):
             self.snapcmds, [['install', 'snapf1', None]])
 
     def test_render_op_snap_config(self):
-        op = makeop('install', 'snapf1', config=b'myconfig')
+        mycfg = {'key1': 'value1'}
+        name = "snapf1"
+        op = makeop('install', name, config=mycfg)
         render_snap_op(**op)
         self.assertEqual(
-            self.snapcmds, [['install', 'snapf1', b'myconfig']])
+            self.snapcmds, [['install', name, {'config': {name: mycfg}}]])
 
     def test_render_op_config_bytes(self):
-        op = makeop('config', 'snapf1', config=b'myconfig')
+        name = "snapf1"
+        mycfg = b'myconfig'
+        op = makeop('config', name, config=mycfg)
         render_snap_op(**op)
         self.assertEqual(
-            self.snapcmds, [['config', 'snapf1', b'myconfig']])
+            self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
 
     def test_render_op_config_string(self):
+        name = 'snapf1'
         mycfg = 'myconfig: foo\nhisconfig: bar\n'
-        op = makeop('config', 'snapf1', config=mycfg)
+        op = makeop('config', name, config=mycfg)
         render_snap_op(**op)
         self.assertEqual(
-            self.snapcmds, [['config', 'snapf1', mycfg.encode()]])
+            self.snapcmds, [['config', 'snapf1', {'config': {name: mycfg}}]])
 
     def test_render_op_config_dict(self):
         # config entry for package can be a dict, not a string blob
         mycfg = {'foo': 'bar'}
-        op = makeop('config', 'snapf1', config=mycfg)
+        name = 'snapf1'
+        op = makeop('config', name, config=mycfg)
         render_snap_op(**op)
         # snapcmds is a list of 3-entry lists. data_found will be the
         # blob of data in the file in 'snappy install --config=<file>'
         data_found = self.snapcmds[0][2]
-        self.assertEqual(mycfg, util.load_yaml(data_found))
+        self.assertEqual(mycfg, data_found['config'][name])
 
     def test_render_op_config_list(self):
         # config entry for package can be a list, not a string blob
         mycfg = ['foo', 'bar', 'wark', {'f1': 'b1'}]
-        op = makeop('config', 'snapf1', config=mycfg)
+        name = "snapf1"
+        op = makeop('config', name, config=mycfg)
         render_snap_op(**op)
         data_found = self.snapcmds[0][2]
-        self.assertEqual(mycfg, util.load_yaml(data_found, allowed=(list,)))
+        self.assertEqual(mycfg, data_found['config'][name])
 
     def test_render_op_config_int(self):
         # config entry for package can be a list, not a string blob
         mycfg = 1
-        op = makeop('config', 'snapf1', config=mycfg)
+        name = 'snapf1'
+        op = makeop('config', name, config=mycfg)
         render_snap_op(**op)
         data_found = self.snapcmds[0][2]
-        self.assertEqual(mycfg, util.load_yaml(data_found, allowed=(int,)))
+        self.assertEqual(mycfg, data_found['config'][name])
+
+    def test_render_does_not_pad_cfgfile(self):
+        # package_ops with cfgfile should not modify --file= content.
+        mydata = "foo1: bar1\nk: [l1, l2, l3]\n"
+        self.populate_tmp(
+            {"snapf1.snap": b"foo1", "snapf1.config": mydata.encode()})
+        ret = get_package_ops(
+            packages=[], configs={}, installed=[], fspath=self.tmp)
+        self.assertEqual(
+            ret,
+            [makeop_tmpd(self.tmp, 'install', 'snapf1', path="snapf1.snap",
+                         cfgfile="snapf1.config")])
+
+        # now the op was ok, but test that render didn't mess it up.
+        render_snap_op(**ret[0])
+        data_found = self.snapcmds[0][2]
+        # the data found gets loaded in the snapcmd interpretation
+        # so this comparison is a bit lossy, but input to snappy config
+        # is expected to be yaml loadable, so it should be OK.
+        self.assertEqual(yaml.safe_load(mydata), data_found)
 
 
 def makeop_tmpd(tmpd, op, name, config=None, path=None, cfgfile=None):
-- 
cgit v1.2.3


From 6c48673245225c5530c7cc08f5ab82794c708f71 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 11:33:58 -0400
Subject: set snappy command earlier

---
 cloudinit/config/cc_snappy.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index a3af98a6..f237feef 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -218,12 +218,12 @@ def handle(name, cfg, cloud, log, args):
         LOG.debug("%s: 'auto' mode, and system not snappy", name)
         return
 
+    set_snappy_command()
+
     pkg_ops = get_package_ops(packages=mycfg['packages'],
                               configs=mycfg['config'],
                               fspath=mycfg['packages_dir'])
 
-    set_snappy_command()
-
     fails = []
     for pkg_op in pkg_ops:
         try:
-- 
cgit v1.2.3


From b4989280d7285f214c1016efa36a20ad57821d6b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 14:19:56 -0400
Subject: address namespacing

---
 cloudinit/config/cc_snappy.py                      | 52 +++++++++++++++++-----
 .../unittests/test_handler/test_handler_snappy.py  | 46 +++++++++++++++++++
 2 files changed, 88 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index f237feef..f8f67e1f 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -7,7 +7,7 @@ Example config:
   snappy:
     system_snappy: auto
     ssh_enabled: False
-    packages: [etcd, pkg2]
+    packages: [etcd, pkg2.smoser]
     config:
       pkgname:
         key2: value2
@@ -18,11 +18,15 @@ Example config:
  - ssh_enabled:
    This defaults to 'False'.  Set to a non-false value to enable ssh service
  - snap installation and config
-   The above would install 'etcd', and then install 'pkg2' with a
+   The above would install 'etcd', and then install 'pkg2.smoser' with a
    '--config=<file>' argument where 'file' as 'config-blob' inside it.
    If 'pkgname' is installed already, then 'snappy config pkgname <file>'
    will be called where 'file' has 'pkgname-config-blob' as its content.
 
+   Entries in 'config' can be namespaced or non-namespaced for a package.
+   In either case, the config provided to snappy command is non-namespaced.
+   The package name is provided as it appears.
+
    If 'packages_dir' has files in it that end in '.snap', then they are
    installed.  Given 3 files:
      <packages_dir>/foo.snap
@@ -52,6 +56,7 @@ LOG = logging.getLogger(__name__)
 
 frequency = PER_INSTANCE
 SNAPPY_CMD = "snappy"
+NAMESPACE_DELIM = '.'
 
 BUILTIN_CFG = {
     'packages': [],
@@ -81,10 +86,20 @@ def makeop(op, name, config=None, path=None, cfgfile=None):
             'cfgfile': cfgfile})
 
 
+def get_package_config(configs, name):
+    # load the package's config from the configs dict.
+    # prefer full-name entry (config-example.canonical) 
+    # over short name entry (config-example)
+    if name in configs:
+        return configs[name]
+    return configs.get(name.partition(NAMESPACE_DELIM)[0])
+
+
 def get_package_ops(packages, configs, installed=None, fspath=None):
     # get the install an config operations that should be done
     if installed is None:
         installed = read_installed_packages()
+    short_installed = [p.partition(NAMESPACE_DELIM)[0] for p in installed]
 
     if not packages:
         packages = []
@@ -95,21 +110,31 @@ def get_package_ops(packages, configs, installed=None, fspath=None):
     ops += get_fs_package_ops(fspath)
 
     for name in packages:
-        ops.append(makeop('install', name, configs.get('name')))
+        ops.append(makeop('install', name, get_package_config(configs, name)))
 
     to_install = [f['name'] for f in ops]
+    short_to_install = [f['name'].partition(NAMESPACE_DELIM)[0] for f in ops]
 
     for name in configs:
-        if name in installed and name not in to_install:
-            ops.append(makeop('config', name, config=configs[name]))
+        if name in to_install:
+            continue
+        shortname = name.partition(NAMESPACE_DELIM)[0]
+        if shortname in short_to_install:
+            continue
+        if name in installed or shortname in short_installed:
+            ops.append(makeop('config', name,
+                              config=get_package_config(configs, name)))
 
     # prefer config entries to filepath entries
     for op in ops:
+        if op['op'] != 'install' or not op['cfgfile']:
+            continue
         name = op['name']
-        if name in configs and op['op'] == 'install' and 'cfgfile' in op:
-            LOG.debug("preferring configs[%s] over '%s'", name, op['cfgfile'])
+        fromcfg = get_package_config(configs, op['name'])
+        if fromcfg:
+            LOG.debug("preferring configs[%(name)s] over '%(cfgfile)s'", op)
             op['cfgfile'] = None
-            op['config'] = configs[op['name']]
+            op['config'] = fromcfg
 
     return ops
 
@@ -118,6 +143,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
     if op not in ('install', 'config'):
         raise ValueError("cannot render op '%s'" % op)
 
+    shortname = name.partition(NAMESPACE_DELIM)[0]
     try:
         cfg_tmpf = None
         if config is not None:
@@ -126,7 +152,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
             #   packagename:
             #      config
             # Note, however, we do not touch config files on disk.
-            nested_cfg = {'config': {name: config}}
+            nested_cfg = {'config': {shortname: config}}
             (fd, cfg_tmpf) = tempfile.mkstemp()
             os.write(fd, util.yaml_dumps(nested_cfg).encode())
             os.close(fd)
@@ -151,7 +177,13 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
 
 
 def read_installed_packages():
-    return [p[0] for p in read_pkg_data()]
+    ret = []
+    for (name, date, version, dev) in read_pkg_data():
+        if dev:
+            ret.append(NAMESPACE_DELIM.join(name, dev))
+        else:
+            ret.append(name)
+    return ret
 
 
 def read_pkg_data():
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index f56a22f7..f0776259 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -90,6 +90,15 @@ class TestInstallPackages(t_help.TestCase):
                   makeop('install', 'pkg2', b'mycfg2'),
                   makeop('config', 'xinstalled', b'xcfg')])
 
+    def test_package_ops_install_long_config_short(self):
+        # a package can be installed by full name, but have config by short
+        cfg = {'k1': 'k2'}
+        ret = get_package_ops(
+            packages=['config-example.canonical'],
+            configs={'config-example': cfg}, installed=[])
+        self.assertEqual(
+            ret, [makeop('install', 'config-example.canonical', cfg)])
+
     def test_package_ops_with_file(self):
         self.populate_tmp(
             {"snapf1.snap": b"foo1", "snapf1.config": b"snapf1cfg",
@@ -114,6 +123,34 @@ class TestInstallPackages(t_help.TestCase):
             ret, [makeop_tmpd(self.tmp, 'install', 'snapf1',
                               path="snapf1.snap", config="snapf1cfg-config")])
 
+    def test_package_ops_namespacing(self):
+        cfgs = {
+            'config-example': {'k1': 'v1'},
+            'pkg1': {'p1': 'p2'},
+            'ubuntu-core': {'c1': 'c2'},
+            'notinstalled.smoser': {'s1': 's2'},
+        }
+        cfg = {'config-example-k1': 'config-example-k2'}
+        ret = get_package_ops(
+            packages=['config-example.canonical'], configs=cfgs,
+            installed=['config-example.smoser', 'pkg1.canonical',
+                       'ubuntu-core'])
+
+        expected_configs = [
+            makeop('config', 'pkg1', config=cfgs['pkg1']),
+            makeop('config', 'ubuntu-core', config=cfgs['ubuntu-core'])]
+        expected_installs = [
+            makeop('install', 'config-example.canonical',
+                   config=cfgs['config-example'])]
+
+        installs = [i for i in ret if i['op'] == 'install']
+        configs = [c for c in ret if c['op'] == 'config']
+
+        self.assertEqual(installs, expected_installs)
+        # configs are not ordered
+        self.assertEqual(len(configs), len(expected_configs))
+        self.assertTrue(all(found in expected_configs for found in configs))
+
     def test_render_op_localsnap(self):
         self.populate_tmp({"snapf1.snap": b"foo1"})
         op = makeop_tmpd(self.tmp, 'install', 'snapf1',
@@ -190,6 +227,15 @@ class TestInstallPackages(t_help.TestCase):
         data_found = self.snapcmds[0][2]
         self.assertEqual(mycfg, data_found['config'][name])
 
+    def test_render_long_configs_short(self):
+        # install a namespaced package should have un-namespaced config
+        mycfg = {'k1': 'k2'}
+        name = 'snapf1'
+        op = makeop('install', name + ".smoser", config=mycfg)
+        render_snap_op(**op)
+        data_found = self.snapcmds[0][2]
+        self.assertEqual(mycfg, data_found['config'][name])
+
     def test_render_does_not_pad_cfgfile(self):
         # package_ops with cfgfile should not modify --file= content.
         mydata = "foo1: bar1\nk: [l1, l2, l3]\n"
-- 
cgit v1.2.3


From 25a05c3367e024fcee5da0b4f15b5ca599dd92f2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 14:31:06 -0400
Subject: fix read_install

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index f8f67e1f..d9dd9771 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -180,7 +180,7 @@ def read_installed_packages():
     ret = []
     for (name, date, version, dev) in read_pkg_data():
         if dev:
-            ret.append(NAMESPACE_DELIM.join(name, dev))
+            ret.append(NAMESPACE_DELIM.join([name, dev]))
         else:
             ret.append(name)
     return ret
-- 
cgit v1.2.3


From bf52085a1fa3529329a5c48097a12a6e9b93eb22 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 15:19:51 -0400
Subject: NoCloud: the local portion of NoCloud incorrectly claimed datasources

The intent has always been for the local datasource (NoCloud) to require
the provider of metadata to provide 'dsmode=local'.  If that wasn't found,
then the default 'dsmode' would be 'net', and the NoCloudNet datasource
would then find the data.

The bug here was that the default 'net' wasn't being set when
data was found on a local source.
---
 ChangeLog                              | 1 +
 cloudinit/sources/DataSourceNoCloud.py | 5 +++--
 2 files changed, 4 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 32a4f5d6..70ba9ae3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -28,6 +28,7 @@
    (LP: #1422388)
  - readurl, read_file_or_url returns bytes, user must convert as necessary
  - SmartOS: use v2 metadata service (LP: #1436417) [Daniel Watkins]
+ - NoCloud: fix local datasource claiming found without explicit dsmode
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c26a645c..6a861af3 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -124,7 +124,7 @@ class DataSourceNoCloud(sources.DataSource):
                     # that is more likely to be what is desired.  If they want
                     # dsmode of local, then they must specify that.
                     if 'dsmode' not in mydata['meta-data']:
-                        mydata['dsmode'] = "net"
+                        mydata['meta-data']['dsmode'] = "net"
 
                     LOG.debug("Using data from %s", dev)
                     found.append(dev)
@@ -193,7 +193,8 @@ class DataSourceNoCloud(sources.DataSource):
             self.vendordata = mydata['vendor-data']
             return True
 
-        LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
+        LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+                  mydata['meta-data']['dsmode'])
         return False
 
 
-- 
cgit v1.2.3


From 09cc5909e3d69c03622b7dc2c4cb35fd378743cb Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 16:20:05 -0400
Subject: be more user-friendly when looking for matching .config

On fspath installs, look for .config files harder.
Given a file named:
  pkg.namespace_0.version_arch.snap
We'll search for config files named:
  pkg.namespace_0.version_arch.config
  pkg.namespace.config
  pkg.config
---
 cloudinit/config/cc_snappy.py                      | 21 +++++++++----
 .../unittests/test_handler/test_handler_snappy.py  | 34 ++++++++++++++++++++++
 2 files changed, 50 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index d9dd9771..74ae3ac0 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -67,15 +67,26 @@ BUILTIN_CFG = {
 }
 
 
+def parse_filename(fname):
+    fname = os.path.basename(fname)
+    fname_noext = fname.rpartition(".")[0]
+    name = fname_noext.partition("_")[0]
+    shortname = name.partition(".")[0]
+    return(name, shortname, fname_noext)
+    
+
 def get_fs_package_ops(fspath):
     if not fspath:
         return []
     ops = []
-    for snapfile in glob.glob(os.path.sep.join([fspath, '*.snap'])):
-        cfg = snapfile.rpartition(".")[0] + ".config"
-        name = os.path.basename(snapfile).rpartition(".")[0]
-        if not os.path.isfile(cfg):
-            cfg = None
+    for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
+        (name, shortname, fname_noext) = parse_filename(snapfile)
+        cfg = None
+        for cand in set((fname_noext, name, shortname,)):
+            fpcand = os.path.sep.join([fspath, cand]) + ".config"
+            if os.path.isfile(fpcand):
+                cfg = fpcand
+                break
         ops.append(makeop('install', name, config=None,
                    path=snapfile, cfgfile=cfg))
     return ops
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index f0776259..84512846 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -112,6 +112,40 @@ class TestInstallPackages(t_help.TestCase):
              makeop_tmpd(self.tmp, 'install', 'snapf2', path="snapf2.snap"),
              makeop('install', 'pkg1')])
 
+    def test_package_ops_common_filename(self):
+        # fish package name from filename
+        # package names likely look like: pkgname.namespace_version_arch.snap
+        fname = "xkcd-webserver.canonical_0.3.4_all.snap"
+        name = "xkcd-webserver.canonical"
+        shortname = "xkcd-webserver"
+
+        # find filenames
+        self.populate_tmp(
+            {"pkg-ws.smoser_0.3.4_all.snap": "pkg-ws-snapdata",
+             "pkg-ws.config": "pkg-ws-config",
+             "pkg1.smoser_1.2.3_all.snap": "pkg1.snapdata",
+             "pkg1.smoser.config": "pkg1.smoser.config-data",
+             "pkg1.config": "pkg1.config-data",
+             "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
+             "pkg2.smoser_0.0_amd64.config": "pkg2.config",
+            })
+
+        ret = get_package_ops(
+            packages=[], configs={}, installed=[], fspath=self.tmp)
+        raise Exception("ret: %s" % ret)
+        self.assertEqual(
+            ret,
+            [makeop_tmpd(self.tmp, 'install', 'pkg-ws.smoser',
+                         path="pkg-ws.smoser_0.3.4_all.snap",
+                         cfgfile="pkg-ws.config"),
+             makeop_tmpd(self.tmp, 'install', 'pkg1.smoser',
+                         path="pkg1.smoser_1.2.3_all.snap",
+                         cfgfile="pkg1.smoser.config"),
+             makeop_tmpd(self.tmp, 'install', 'pkg2.smoser',
+                         path="pkg2.smoser_0.0_amd64.snap",
+                         cfgfile="pkg2.smoser_0.0_amd64.config"),
+             ])
+
     def test_package_ops_config_overrides_file(self):
         # config data overrides local file .config
         self.populate_tmp(
-- 
cgit v1.2.3


From 6f738bea5d2aa29cdf14d0dc2a6e880517ab2bc2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 16:27:47 -0400
Subject: do not use set

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 74ae3ac0..05676321 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -82,7 +82,7 @@ def get_fs_package_ops(fspath):
     for snapfile in sorted(glob.glob(os.path.sep.join([fspath, '*.snap']))):
         (name, shortname, fname_noext) = parse_filename(snapfile)
         cfg = None
-        for cand in set((fname_noext, name, shortname,)):
+        for cand in (fname_noext, name, shortname):
             fpcand = os.path.sep.join([fspath, cand]) + ".config"
             if os.path.isfile(fpcand):
                 cfg = fpcand
-- 
cgit v1.2.3


From 522a146eadcdb30e68acaaf792c391a7f1da3151 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 27 Mar 2015 17:03:59 -0400
Subject: allow-unauthenticated when done from local file

---
 cloudinit/config/cc_snappy.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 05676321..131ee7ea 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -174,6 +174,7 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
             if cfgfile:
                 cmd.append('--config=' + cfgfile)
             if path:
+                cmd.append("--allow-unauthenticated")
                 cmd.append(path)
             else:
                 cmd.append(name)
-- 
cgit v1.2.3


From 8165000c3975db07cb5b8b29410635dd6c9345bd Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 31 Mar 2015 14:20:00 -0400
Subject: adjust cc_snappy for snappy install package with config.

It was believed that to install a package with config the command was:
  snappy install --config=config-file <package>
Instead, what was implemented in snappy was:
  snappy install <package> [<config-file>]

This modifies cloud-init to invoke the latter and changes the tests
appropriately.

LP: #1438836
---
 cloudinit/config/cc_snappy.py                       |  9 ++++-----
 tests/unittests/test_handler/test_handler_snappy.py | 10 ++++++----
 2 files changed, 10 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 131ee7ea..6a7ae09b 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -19,7 +19,7 @@ Example config:
    This defaults to 'False'.  Set to a non-false value to enable ssh service
  - snap installation and config
    The above would install 'etcd', and then install 'pkg2.smoser' with a
-   '--config=<file>' argument where 'file' as 'config-blob' inside it.
+   '<config-file>' argument where 'config-file' has 'config-blob' inside it.
    If 'pkgname' is installed already, then 'snappy config pkgname <file>'
    will be called where 'file' has 'pkgname-config-blob' as its content.
 
@@ -33,8 +33,7 @@ Example config:
      <packages_dir>/foo.config
      <packages_dir>/bar.snap
    cloud-init will invoke:
-     snappy install "--config=<packages_dir>/foo.config" \
-         <packages_dir>/foo.snap
+     snappy install <packages_dir>/foo.snap <packages_dir>/foo.config
      snappy install <packages_dir>/bar.snap
 
    Note, that if provided a 'config' entry for 'ubuntu-core', then
@@ -171,13 +170,13 @@ def render_snap_op(op, name, path=None, cfgfile=None, config=None):
 
         cmd = [SNAPPY_CMD, op]
         if op == 'install':
-            if cfgfile:
-                cmd.append('--config=' + cfgfile)
             if path:
                 cmd.append("--allow-unauthenticated")
                 cmd.append(path)
             else:
                 cmd.append(name)
+            if cfgfile:
+                cmd.append(cfgfile)
         elif op == 'config':
             cmd += [name, cfgfile]
 
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index 8effd99d..f3109bac 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -53,15 +53,17 @@ class TestInstallPackages(t_help.TestCase):
             config = None
             pkg = None
             for arg in args[2:]:
-                if arg.startswith("--config="):
-                    cfgfile = arg.partition("=")[2]
+                if arg.startswith("-"):
+                    continue
+                if not pkg:
+                    pkg = arg
+                elif not config:
+                    cfgfile = arg
                     if cfgfile == "-":
                         config = kwargs.get('data', '')
                     elif cfgfile:
                         with open(cfgfile, "rb") as fp:
                             config = yaml.safe_load(fp.read())
-                elif not pkg and not arg.startswith("-"):
-                    pkg = arg
             self.snapcmds.append(['install', pkg, config])
 
     def test_package_ops_1(self):
-- 
cgit v1.2.3


From b05d0d54d29553e1fe1961ccc64da7d0b45016dd Mon Sep 17 00:00:00 2001
From: Gerhard Muntingh <gerhard@qux.nl>
Date: Tue, 14 Apr 2015 15:20:39 +0200
Subject: Add functionality to fixate the uid of a newly added user.

---
 cloudinit/distros/__init__.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab874b45..b297c78b 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -318,6 +318,7 @@ class Distro(object):
             "gecos": '--comment',
             "homedir": '--home',
             "primary_group": '--gid',
+            "uid": '--uid',
             "groups": '--groups',
             "passwd": '--password',
             "shell": '--shell',
-- 
cgit v1.2.3


From b6060efa4bd1de7f49f6aca3e97cfe77947f3a93 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 15 Apr 2015 12:13:17 +0100
Subject: Add unit tests for Azure hostname bouncing.

Including minor refactoring to make mocking considerably easier.
---
 cloudinit/sources/DataSourceAzure.py          |  28 ++--
 tests/unittests/test_datasource/test_azure.py | 186 +++++++++++++++++++-------
 2 files changed, 161 insertions(+), 53 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 6e030217..d4211fc4 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -66,6 +66,14 @@ DS_CFG_PATH = ['datasource', DS_NAME]
 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
 
 
+def get_hostname(hostname_command='hostname'):
+    return util.subp(hostname_command, capture=True)[0].strip()
+
+
+def set_hostname(hostname, hostname_command='hostname'):
+    util.subp([hostname_command, hostname])
+
+
 class DataSourceAzureNet(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -313,13 +321,22 @@ def handle_set_hostname(enabled, hostname, cfg):
                           hostname_command=cfg['hostname_command'])
 
 
+def perform_hostname_bounce(command, env):
+    shell = not isinstance(command, (list, tuple))
+    # capture=False, see comments in bug 1202758 and bug 1206164.
+    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
+                  get_uptime=True, func=util.subp,
+                  kwargs={'args': command, 'shell': shell, 'capture': False,
+                          'env': env})
+
+
 def apply_hostname_bounce(hostname, policy, interface, command,
                           hostname_command="hostname"):
     # set the hostname to 'hostname' if it is not already set to that.
     # then, if policy is not off, bounce the interface using command
-    prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
+    prev_hostname = get_hostname()
 
-    util.subp([hostname_command, hostname])
+    set_hostname(hostname, hostname_command)
 
     msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
            (prev_hostname, hostname, policy, interface))
@@ -341,12 +358,7 @@ def apply_hostname_bounce(hostname, policy, interface, command,
         command = BOUNCE_COMMAND
 
     LOG.debug("pubhname: publishing hostname [%s]", msg)
-    shell = not isinstance(command, (list, tuple))
-    # capture=False, see comments in bug 1202758 and bug 1206164.
-    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
-        get_uptime=True, func=util.subp,
-        kwargs={'args': command, 'shell': shell, 'capture': False,
-                'env': env})
+    perform_hostname_bounce(command, env)
 
 
 def crtfile_to_pubkey(fname):
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 8112c69b..3adf9bdf 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -116,9 +116,6 @@ class TestAzureDataSource(TestCase):
             data['iid_from_shared_cfg'] = path
             return 'i-my-azure-id'
 
-        def _apply_hostname_bounce(**kwargs):
-            data['apply_hostname_bounce'] = kwargs
-
         if data.get('ovfcontent') is not None:
             populate_dir(os.path.join(self.paths.seed_dir, "azure"),
                          {'ovf-env.xml': data['ovfcontent']})
@@ -132,7 +129,9 @@ class TestAzureDataSource(TestCase):
             (mod, 'wait_for_files', _wait_for_files),
             (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
             (mod, 'iid_from_shared_config', _iid_from_shared_config),
-            (mod, 'apply_hostname_bounce', _apply_hostname_bounce),
+            (mod, 'perform_hostname_bounce', mock.MagicMock()),
+            (mod, 'get_hostname', mock.MagicMock()),
+            (mod, 'set_hostname', mock.MagicMock()),
             ])
 
         dsrc = mod.DataSourceAzureNet(
@@ -272,47 +271,6 @@ class TestAzureDataSource(TestCase):
         for mypk in mypklist:
             self.assertIn(mypk, dsrc.cfg['_pubkeys'])
 
-    def test_disabled_bounce(self):
-        pass
-
-    def test_apply_bounce_call_1(self):
-        # hostname needs to get through to apply_hostname_bounce
-        odata = {'HostName': 'my-random-hostname'}
-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
-        self._get_ds(data).get_data()
-        self.assertIn('hostname', data['apply_hostname_bounce'])
-        self.assertEqual(data['apply_hostname_bounce']['hostname'],
-                         odata['HostName'])
-
-    def test_apply_bounce_call_configurable(self):
-        # hostname_bounce should be configurable in datasource cfg
-        cfg = {'hostname_bounce': {'interface': 'eth1', 'policy': 'off',
-                                   'command': 'my-bounce-command',
-                                   'hostname_command': 'my-hostname-command'}}
-        odata = {'HostName': "xhost",
-                'dscfg': {'text': b64e(yaml.dump(cfg)),
-                          'encoding': 'base64'}}
-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-        self._get_ds(data).get_data()
-
-        for k in cfg['hostname_bounce']:
-            self.assertIn(k, data['apply_hostname_bounce'])
-
-        for k, v in cfg['hostname_bounce'].items():
-            self.assertEqual(data['apply_hostname_bounce'][k], v)
-
-    def test_set_hostname_disabled(self):
-        # config specifying set_hostname off should not bounce
-        cfg = {'set_hostname': False}
-        odata = {'HostName': "xhost",
-                'dscfg': {'text': b64e(yaml.dump(cfg)),
-                          'encoding': 'base64'}}
-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-        self._get_ds(data).get_data()
-
-        self.assertEqual(data.get('apply_hostname_bounce', "N/A"), "N/A")
-
     def test_default_ephemeral(self):
         # make sure the ephemeral device works
         odata = {}
@@ -425,6 +383,144 @@ class TestAzureDataSource(TestCase):
             load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
 
 
+class TestAzureBounce(TestCase):
+
+    def mock_out_azure_moving_parts(self):
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'invoke_agent'))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'wait_for_files'))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'iid_from_shared_config',
+                              mock.MagicMock(return_value='i-my-azure-id')))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
+                              mock.MagicMock(return_value=[])))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'find_ephemeral_disk',
+                              mock.MagicMock(return_value=None)))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'find_ephemeral_part',
+                              mock.MagicMock(return_value=None)))
+
+    def setUp(self):
+        super(TestAzureBounce, self).setUp()
+        self.tmp = tempfile.mkdtemp()
+        self.waagent_d = os.path.join(self.tmp, 'var', 'lib', 'waagent')
+        self.paths = helpers.Paths({'cloud_dir': self.tmp})
+        self.addCleanup(shutil.rmtree, self.tmp)
+        DataSourceAzure.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
+        self.patches = ExitStack()
+        self.mock_out_azure_moving_parts()
+        self.get_hostname = self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'get_hostname'))
+        self.set_hostname = self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'set_hostname'))
+        self.subp = self.patches.enter_context(
+            mock.patch('cloudinit.sources.DataSourceAzure.util.subp'))
+
+    def tearDown(self):
+        self.patches.close()
+
+    def _get_ds(self, ovfcontent=None):
+        if ovfcontent is not None:
+            populate_dir(os.path.join(self.paths.seed_dir, "azure"),
+                         {'ovf-env.xml': ovfcontent})
+        return DataSourceAzure.DataSourceAzureNet(
+            {}, distro=None, paths=self.paths)
+
+    def get_ovf_env_with_dscfg(self, hostname, cfg):
+        odata = {
+            'HostName': hostname,
+            'dscfg': {
+                'text': b64e(yaml.dump(cfg)),
+                'encoding': 'base64'
+            }
+        }
+        return construct_valid_ovf_env(data=odata)
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_disabled_bounce_does_not_perform_bounce(
+            self, perform_hostname_bounce):
+        cfg = {'hostname_bounce': {'policy': 'off'}}
+        self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+        self.assertEqual(0, perform_hostname_bounce.call_count)
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_unchanged_hostname_does_not_perform_bounce(
+            self, perform_hostname_bounce):
+        host_name = 'unchanged-host-name'
+        self.get_hostname.return_value = host_name
+        cfg = {'hostname_bounce': {'policy': 'yes'}}
+        self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+        self.assertEqual(0, perform_hostname_bounce.call_count)
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_force_performs_bounce_regardless(self, perform_hostname_bounce):
+        host_name = 'unchanged-host-name'
+        self.get_hostname.return_value = host_name
+        cfg = {'hostname_bounce': {'policy': 'force'}}
+        self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+        self.assertEqual(1, perform_hostname_bounce.call_count)
+
+    def test_different_hostnames_sets_hostname(self):
+        expected_hostname = 'azure-expected-host-name'
+        self.get_hostname.return_value = 'default-host-name'
+        self._get_ds(
+            self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+        self.assertEqual(expected_hostname,
+                         self.set_hostname.call_args_list[0][0][0])
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_different_hostnames_performs_bounce(
+            self, perform_hostname_bounce):
+        expected_hostname = 'azure-expected-host-name'
+        self.get_hostname.return_value = 'default-host-name'
+        self._get_ds(
+            self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
+        self.assertEqual(1, perform_hostname_bounce.call_count)
+
+    def test_environment_correct_for_bounce_command(self):
+        interface = 'int0'
+        hostname = 'my-new-host'
+        old_hostname = 'my-old-host'
+        self.get_hostname.return_value = old_hostname
+        cfg = {'hostname_bounce': {'interface': interface, 'policy': 'force'}}
+        data = self.get_ovf_env_with_dscfg(hostname, cfg)
+        self._get_ds(data).get_data()
+        self.assertEqual(1, self.subp.call_count)
+        bounce_env = self.subp.call_args[1]['env']
+        self.assertEqual(interface, bounce_env['interface'])
+        self.assertEqual(hostname, bounce_env['hostname'])
+        self.assertEqual(old_hostname, bounce_env['old_hostname'])
+
+    def test_default_bounce_command_used_by_default(self):
+        cmd = 'default-bounce-command'
+        DataSourceAzure.BUILTIN_DS_CONFIG['hostname_bounce']['command'] = cmd
+        cfg = {'hostname_bounce': {'policy': 'force'}}
+        data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+        self._get_ds(data).get_data()
+        self.assertEqual(1, self.subp.call_count)
+        bounce_args = self.subp.call_args[1]['args']
+        self.assertEqual(cmd, bounce_args)
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_set_hostname_option_can_disable_bounce(
+            self, perform_hostname_bounce):
+        cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+        data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+        self._get_ds(data).get_data()
+
+        self.assertEqual(0, perform_hostname_bounce.call_count)
+
+    def test_set_hostname_option_can_disable_hostname_set(self):
+        cfg = {'set_hostname': False, 'hostname_bounce': {'policy': 'force'}}
+        data = self.get_ovf_env_with_dscfg('some-hostname', cfg)
+        self._get_ds(data).get_data()
+
+        self.assertEqual(0, self.set_hostname.call_count)
+
+
 class TestReadAzureOvf(TestCase):
     def test_invalid_xml_raises_non_azure_ds(self):
         invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
-- 
cgit v1.2.3


From b8706d7dc930c5c9dce1f96a000c66e5dda14e02 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 15 Apr 2015 12:13:17 +0100
Subject: Reset host name after bounce has allowed walinuxagent to run
 successfully.

---
 cloudinit/sources/DataSourceAzure.py          | 134 +++++++++++++-------------
 tests/unittests/test_datasource/test_azure.py |  31 ++++++
 2 files changed, 99 insertions(+), 66 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index d4211fc4..a19d9ca2 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,6 +17,7 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import base64
+import contextlib
 import crypt
 import fnmatch
 import os
@@ -74,6 +75,28 @@ def set_hostname(hostname, hostname_command='hostname'):
     util.subp([hostname_command, hostname])
 
 
+@contextlib.contextmanager
+def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+    """
+    Set a temporary hostname, restoring the previous hostname on exit.
+
+    Will have the value of the previous hostname when used as a context
+    manager, or None if the hostname was not changed.
+    """
+    policy = cfg['hostname_bounce']['policy']
+    previous_hostname = get_hostname(hostname_command)
+    if (not util.is_true(cfg.get('set_hostname'))
+            or util.is_false(policy)
+            or (previous_hostname == temp_hostname and policy != 'force')):
+        yield None
+        return
+    set_hostname(temp_hostname, hostname_command)
+    try:
+        yield previous_hostname
+    finally:
+        set_hostname(previous_hostname, hostname_command)
+
+
 class DataSourceAzureNet(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -162,33 +185,40 @@ class DataSourceAzureNet(sources.DataSource):
         # the directory to be protected.
         write_files(ddir, files, dirmode=0o700)
 
-        # handle the hostname 'publishing'
-        try:
-            handle_set_hostname(mycfg.get('set_hostname'),
-                                self.metadata.get('local-hostname'),
-                                mycfg['hostname_bounce'])
-        except Exception as e:
-            LOG.warn("Failed publishing hostname: %s", e)
-            util.logexc(LOG, "handling set_hostname failed")
-
-        try:
-            invoke_agent(mycfg['agent_command'])
-        except util.ProcessExecutionError:
-            # claim the datasource even if the command failed
-            util.logexc(LOG, "agent command '%s' failed.",
-                        mycfg['agent_command'])
-
-        shcfgxml = os.path.join(ddir, "SharedConfig.xml")
-        wait_for = [shcfgxml]
-
-        fp_files = []
-        for pk in self.cfg.get('_pubkeys', []):
-            bname = str(pk['fingerprint'] + ".crt")
-            fp_files += [os.path.join(ddir, bname)]
+        temp_hostname = self.metadata.get('local-hostname')
+        hostname_command = mycfg['hostname_bounce']['hostname_command']
+        with temporary_hostname(temp_hostname, mycfg,
+                                hostname_command=hostname_command) \
+                as previous_hostname:
+            if (previous_hostname is not None
+                    and util.is_true(mycfg.get('set_hostname'))):
+                cfg = mycfg['hostname_bounce']
+                try:
+                    perform_hostname_bounce(hostname=temp_hostname,
+                                            cfg=cfg,
+                                            prev_hostname=previous_hostname)
+                except Exception as e:
+                    LOG.warn("Failed publishing hostname: %s", e)
+                    util.logexc(LOG, "handling set_hostname failed")
 
-        missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
-                                func=wait_for_files,
-                                args=(wait_for + fp_files,))
+            try:
+                invoke_agent(mycfg['agent_command'])
+            except util.ProcessExecutionError:
+                # claim the datasource even if the command failed
+                util.logexc(LOG, "agent command '%s' failed.",
+                            mycfg['agent_command'])
+
+            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
+            wait_for = [shcfgxml]
+
+            fp_files = []
+            for pk in self.cfg.get('_pubkeys', []):
+                bname = str(pk['fingerprint'] + ".crt")
+                fp_files += [os.path.join(ddir, bname)]
+
+            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+                                    func=wait_for_files,
+                                    args=(wait_for + fp_files,))
         if len(missing):
             LOG.warn("Did not find files, but going on: %s", missing)
 
@@ -307,48 +337,15 @@ def support_new_ephemeral(cfg):
     return mod_list
 
 
-def handle_set_hostname(enabled, hostname, cfg):
-    if not util.is_true(enabled):
-        return
-
-    if not hostname:
-        LOG.warn("set_hostname was true but no local-hostname")
-        return
-
-    apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
-                          interface=cfg['interface'],
-                          command=cfg['command'],
-                          hostname_command=cfg['hostname_command'])
-
-
-def perform_hostname_bounce(command, env):
-    shell = not isinstance(command, (list, tuple))
-    # capture=False, see comments in bug 1202758 and bug 1206164.
-    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
-                  get_uptime=True, func=util.subp,
-                  kwargs={'args': command, 'shell': shell, 'capture': False,
-                          'env': env})
-
-
-def apply_hostname_bounce(hostname, policy, interface, command,
-                          hostname_command="hostname"):
+def perform_hostname_bounce(hostname, cfg, prev_hostname):
     # set the hostname to 'hostname' if it is not already set to that.
     # then, if policy is not off, bounce the interface using command
-    prev_hostname = get_hostname()
-
-    set_hostname(hostname, hostname_command)
-
-    msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
-           (prev_hostname, hostname, policy, interface))
-
-    if util.is_false(policy):
-        LOG.debug("pubhname: policy false, skipping [%s]", msg)
-        return
-
-    if prev_hostname == hostname and policy != "force":
-        LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
-        return
+    command = cfg['command']
+    interface = cfg['interface']
+    policy = cfg['policy']
 
+    msg = ("hostname=%s policy=%s interface=%s" %
+           (hostname, policy, interface))
     env = os.environ.copy()
     env['interface'] = interface
     env['hostname'] = hostname
@@ -358,7 +355,12 @@ def apply_hostname_bounce(hostname, policy, interface, command,
         command = BOUNCE_COMMAND
 
     LOG.debug("pubhname: publishing hostname [%s]", msg)
-    perform_hostname_bounce(command, env)
+    shell = not isinstance(command, (list, tuple))
+    # capture=False, see comments in bug 1202758 and bug 1206164.
+    util.log_time(logfunc=LOG.debug, msg="publishing hostname",
+                  get_uptime=True, func=util.subp,
+                  kwargs={'args': command, 'shell': shell, 'capture': False,
+                          'env': env})
 
 
 def crtfile_to_pubkey(fname):
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 3adf9bdf..7e789853 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -439,6 +439,11 @@ class TestAzureBounce(TestCase):
         }
         return construct_valid_ovf_env(data=odata)
 
+    def test_disabled_bounce_does_not_change_hostname(self):
+        cfg = {'hostname_bounce': {'policy': 'off'}}
+        self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
+        self.assertEqual(0, self.set_hostname.call_count)
+
     @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
     def test_disabled_bounce_does_not_perform_bounce(
             self, perform_hostname_bounce):
@@ -446,6 +451,13 @@ class TestAzureBounce(TestCase):
         self._get_ds(self.get_ovf_env_with_dscfg('test-host', cfg)).get_data()
         self.assertEqual(0, perform_hostname_bounce.call_count)
 
+    def test_same_hostname_does_not_change_hostname(self):
+        host_name = 'unchanged-host-name'
+        self.get_hostname.return_value = host_name
+        cfg = {'hostname_bounce': {'policy': 'yes'}}
+        self._get_ds(self.get_ovf_env_with_dscfg(host_name, cfg)).get_data()
+        self.assertEqual(0, self.set_hostname.call_count)
+
     @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
     def test_unchanged_hostname_does_not_perform_bounce(
             self, perform_hostname_bounce):
@@ -480,6 +492,25 @@ class TestAzureBounce(TestCase):
             self.get_ovf_env_with_dscfg(expected_hostname, {})).get_data()
         self.assertEqual(1, perform_hostname_bounce.call_count)
 
+    def test_different_hostnames_sets_hostname_back(self):
+        initial_host_name = 'default-host-name'
+        self.get_hostname.return_value = initial_host_name
+        self._get_ds(
+            self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+        self.assertEqual(initial_host_name,
+                         self.set_hostname.call_args_list[-1][0][0])
+
+    @mock.patch('cloudinit.sources.DataSourceAzure.perform_hostname_bounce')
+    def test_failure_in_bounce_still_resets_host_name(
+            self, perform_hostname_bounce):
+        perform_hostname_bounce.side_effect = Exception
+        initial_host_name = 'default-host-name'
+        self.get_hostname.return_value = initial_host_name
+        self._get_ds(
+            self.get_ovf_env_with_dscfg('some-host-name', {})).get_data()
+        self.assertEqual(initial_host_name,
+                         self.set_hostname.call_args_list[-1][0][0])
+
     def test_environment_correct_for_bounce_command(self):
         interface = 'int0'
         hostname = 'my-new-host'
-- 
cgit v1.2.3


From 57c3365ec8310ff09cafa4c0f3fbdfb48c787e18 Mon Sep 17 00:00:00 2001
From: brak <brak@brian2>
Date: Wed, 15 Apr 2015 11:18:50 -0400
Subject: CentOS 7 uses systemd.  RHEL distributions using systemd were not
 properly saving the previous-hostname data

---
 cloudinit/distros/rhel.py | 11 +++++++++--
 1 file changed, 9 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 7408989c..eec17c61 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -116,6 +116,7 @@ class Distro(distros.Distro):
         (dist, vers) = util.system_info()['dist'][:2]
         major = (int)(vers.split('.')[0])
         return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7)
+                or (dist.startswith('CentOS Linux') and major >= 7)
                 or (dist.startswith('Fedora') and major >= 18))
 
     def apply_locale(self, locale, out_fn=None):
@@ -132,7 +133,11 @@ class Distro(distros.Distro):
         rhel_util.update_sysconfig_file(out_fn, locale_cfg)
 
     def _write_hostname(self, hostname, out_fn):
-        if self.uses_systemd():
+        # systemd will never update previous-hostname for us, so 
+        # we need to do it ourselves
+        if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
+            util.write_file(out_fn, hostname)
+        elif self.uses_systemd():
             util.subp(['hostnamectl', 'set-hostname', str(hostname)])
         else:
             host_cfg = {
@@ -155,7 +160,9 @@ class Distro(distros.Distro):
         return (host_fn, self._read_hostname(host_fn))
 
     def _read_hostname(self, filename, default=None):
-        if self.uses_systemd():
+        if self.uses_systemd() and filename.endswith('/previous-hostname'):
+            return util.load_file(filename).strip()  
+        elif self.uses_systemd():
             (out, _err) = util.subp(['hostname'])
             if len(out):
                 return out
-- 
cgit v1.2.3


From 96820355ea20fe655a7ebbb68ffb309bf234ab37 Mon Sep 17 00:00:00 2001
From: brak <brak@brian2>
Date: Wed, 15 Apr 2015 11:32:56 -0400
Subject: Don't overwrite the hostname if the user has changed it after we set
 it

---
 cloudinit/distros/__init__.py | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index ab874b45..c699a65a 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -208,6 +208,15 @@ class Distro(object):
                                   and sys_hostname != hostname):
             update_files.append(sys_fn)
 
+        # If something else has changed the hostname after we set it
+        # initially, we should not overwrite those changes (we should
+        # only be setting the hostname once per instance)
+        if (sys_hostname and prev_hostname and
+                sys_hostname != prev_hostname):
+            LOG.info("%s differs from %s, assuming user maintained hostname.",
+                       prev_hostname_fn, sys_fn)
+            return
+
         # Remove duplicates (incase the previous config filename)
         # is the same as the system config filename, don't bother
         # doing it twice
@@ -222,11 +231,6 @@ class Distro(object):
                 util.logexc(LOG, "Failed to write hostname %s to %s", hostname,
                             fn)
 
-        if (sys_hostname and prev_hostname and
-                sys_hostname != prev_hostname):
-            LOG.debug("%s differs from %s, assuming user maintained hostname.",
-                       prev_hostname_fn, sys_fn)
-
         # If the system hostname file name was provided set the
         # non-fqdn as the transient hostname.
         if sys_fn in update_files:
-- 
cgit v1.2.3


From dcd4b2b371059bd6249b4e43af371ee1162273e8 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 16 Apr 2015 16:41:06 -0400
Subject: pep8 fixes

---
 cloudinit/config/cc_snappy.py  | 4 ++--
 cloudinit/handlers/__init__.py | 6 +++---
 tests/unittests/test_data.py   | 5 +++--
 3 files changed, 8 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 6a7ae09b..bfe76558 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -72,7 +72,7 @@ def parse_filename(fname):
     name = fname_noext.partition("_")[0]
     shortname = name.partition(".")[0]
     return(name, shortname, fname_noext)
-    
+
 
 def get_fs_package_ops(fspath):
     if not fspath:
@@ -98,7 +98,7 @@ def makeop(op, name, config=None, path=None, cfgfile=None):
 
 def get_package_config(configs, name):
     # load the package's config from the configs dict.
-    # prefer full-name entry (config-example.canonical) 
+    # prefer full-name entry (config-example.canonical)
     # over short name entry (config-example)
     if name in configs:
         return configs[name]
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index d62fcd19..52defe66 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -170,12 +170,12 @@ def _extract_first_or_bytes(blob, size):
             start = blob.split("\n", 1)[0]
         else:
             # We want to avoid decoding the whole blob (it might be huge)
-            # By taking 4*size bytes we have a guarantee to decode size utf8 chars
-            start = blob[:4*size].decode(errors='ignore').split("\n", 1)[0]
+            # By taking 4*size bytes we guarantee to decode size utf8 chars
+            start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
         if len(start) >= size:
             start = start[:size]
     except UnicodeDecodeError:
-        # Bytes array doesn't contain a text object -- return chunk of raw bytes
+        # Bytes array doesn't contain text so return chunk of raw bytes
         start = blob[0:size]
     return start
 
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index 4f24e2dd..b950c9a5 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -494,10 +494,10 @@ c: 4
             ])
 
     def test_mime_application_octet_stream(self):
-        """Mime message of type application/octet-stream is ignored but shows warning."""
+        """Mime type application/octet-stream is ignored but shows warning."""
         ci = stages.Init()
         message = MIMEBase("application", "octet-stream")
-        message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc\xbf')
+        message.set_payload(b'\xbf\xe6\xb2\xc3\xd3\xba\x13\xa4\xd8\xa1\xcc')
         encoders.encode_base64(message)
         ci.datasource = FakeDataSource(message.as_string().encode())
 
@@ -511,6 +511,7 @@ c: 4
         mockobj.assert_called_once_with(
             ci.paths.get_ipath("cloud_config"), "", 0o600)
 
+
 class TestUDProcess(helpers.ResourceUsingTestCase):
 
     def test_bytes_in_userdata(self):
-- 
cgit v1.2.3


From 341a805fca9a06ce12e9f4bbbe15b3dded9eb6a4 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 16 Apr 2015 17:00:19 -0400
Subject: fix cloud-config-archive handling

handling of cloud-config-archive input would fail in fully_decoded_payload.
part.get_charset() would return a Charset object, but
get_charset.input_codec is a string suitable for passing to decode.

This handles that correctly, and is more careful about binary data inside
input.

The test added verifies that cloud-config inside a cloud-config-archive
is handled correctly and also that binary data there is ignored without
exceptions raised.

LP: #1445143
---
 cloudinit/handlers/__init__.py |  5 ++++-
 cloudinit/user_data.py         |  9 +++++++--
 cloudinit/util.py              |  8 ++++++--
 tests/unittests/test_data.py   | 27 +++++++++++++++++++++++++++
 4 files changed, 44 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index 52defe66..53d5604a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -263,7 +263,10 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
 
 
 def type_from_starts_with(payload, default=None):
-    payload_lc = payload.lower()
+    try:
+        payload_lc = util.decode_binary(payload).lower()
+    except UnicodeDecodeError:
+        return default
     payload_lc = payload_lc.lstrip()
     for text in INCLUSION_SRCH:
         if payload_lc.startswith(text):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index eb3c7336..f7c5787c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -49,6 +49,7 @@ INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
 ARCHIVE_TYPES = ["text/cloud-config-archive"]
 UNDEF_TYPE = "text/plain"
 ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
 
 # This seems to hit most of the gzip possible content types.
 DECOMP_TYPES = [
@@ -265,11 +266,15 @@ class UserDataProcessor(object):
             content = ent.get('content', '')
             mtype = ent.get('type')
             if not mtype:
-                mtype = handlers.type_from_starts_with(content,
-                                                       ARCHIVE_UNDEF_TYPE)
+                default = ARCHIVE_UNDEF_TYPE
+                if isinstance(content, six.binary_type):
+                    default = ARCHIVE_UNDEF_BINARY_TYPE
+                mtype = handlers.type_from_starts_with(content, default)
 
             maintype, subtype = mtype.split('/', 1)
             if maintype == "text":
+                if isinstance(content, six.binary_type):
+                    content = content.decode()
                 msg = MIMEText(content, _subtype=subtype)
             else:
                 msg = MIMEBase(maintype, subtype)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 971c1c2d..cae57770 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -121,8 +121,12 @@ def fully_decoded_payload(part):
     if (six.PY3 and
             part.get_content_maintype() == 'text' and
             isinstance(cte_payload, bytes)):
-        charset = part.get_charset() or 'utf-8'
-        return cte_payload.decode(charset, errors='surrogateescape')
+        charset = part.get_charset()
+        if charset and charset.input_codec:
+            encoding = charset.input_codec
+        else:
+            encoding = 'utf-8'
+        return cte_payload.decode(encoding, errors='surrogateescape')
     return cte_payload
 
 
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index b950c9a5..1b15dafa 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -512,6 +512,33 @@ c: 4
             ci.paths.get_ipath("cloud_config"), "", 0o600)
 
 
+    def test_cloud_config_archive(self):
+        non_decodable = b'\x11\xc9\xb4gTH\xee\x12'
+        data = [{'content': '#cloud-config\npassword: gocubs\n'},
+                {'content': '#cloud-config\nlocale: chicago\n'},
+                {'content': non_decodable}]
+        message = b'#cloud-config-archive\n' + util.yaml_dumps(data).encode()
+
+        ci = stages.Init()
+        ci.datasource = FakeDataSource(message)
+
+        fs = {}
+
+        def fsstore(filename, content, mode=0o0644, omode="wb"):
+            fs[filename] = content
+
+        # consuming the user-data provided should write 'cloud_config' file
+        # which will have our yaml in it.
+        with mock.patch('cloudinit.util.write_file') as mockobj:
+            mockobj.side_effect = fsstore
+            ci.fetch()
+            ci.consume_data()
+
+        cfg = util.load_yaml(fs[ci.paths.get_ipath("cloud_config")])
+        self.assertEqual(cfg.get('password'), 'gocubs')
+        self.assertEqual(cfg.get('locale'), 'chicago')
+
+
 class TestUDProcess(helpers.ResourceUsingTestCase):
 
     def test_bytes_in_userdata(self):
-- 
cgit v1.2.3


From 844ebbee112143e85fb46b4b5ed649729f903d2c Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 20 Apr 2015 15:23:57 +0100
Subject: Refactor GCE metadata fetching to use a helper class.

---
 cloudinit/sources/DataSourceGCE.py | 69 ++++++++++++++++++++------------------
 1 file changed, 36 insertions(+), 33 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 608c07f1..255f5f45 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = {
 REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
 
 
+class GoogleMetadataFetcher(object):
+    headers = {'X-Google-Metadata-Request': True}
+
+    def __init__(self, metadata_address):
+        self.metadata_address = metadata_address
+
+    def get_value(self, path, is_text):
+        value = None
+        try:
+            resp = url_helper.readurl(url=self.metadata_address + path,
+                                      headers=self.headers)
+        except url_helper.UrlError as exc:
+            msg = "url %s raised exception %s"
+            LOG.debug(msg, path, exc)
+        else:
+            if resp.code == 200:
+                if is_text:
+                    value = util.decode_binary(resp.contents)
+                else:
+                    value = resp.contents
+            else:
+                LOG.debug("url %s returned code %s", path, resp.code)
+        return value
+
+
 class DataSourceGCE(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -50,9 +75,6 @@ class DataSourceGCE(sources.DataSource):
             return public_key
 
     def get_data(self):
-        # GCE metadata server requires a custom header since v1
-        headers = {'X-Google-Metadata-Request': True}
-
         # url_map: (our-key, path, required, is_text)
         url_map = [
             ('instance-id', 'instance/id', True, True),
@@ -69,40 +91,21 @@ class DataSourceGCE(sources.DataSource):
             LOG.debug("%s is not resolvable", self.metadata_address)
             return False
 
+        metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
         # iterate over url_map keys to get metadata items
         found = False
         for (mkey, path, required, is_text) in url_map:
-            try:
-                resp = url_helper.readurl(url=self.metadata_address + path,
-                                          headers=headers)
-                if resp.code == 200:
-                    found = True
-                    if is_text:
-                        self.metadata[mkey] = util.decode_binary(resp.contents)
-                    else:
-                        self.metadata[mkey] = resp.contents
+            value = metadata_fetcher.get_value(path, is_text)
+            if value:
+                found = True
+            if required and value is None:
+                msg = "required url %s returned nothing. not GCE"
+                if not found:
+                    LOG.debug(msg, path)
                 else:
-                    if required:
-                        msg = "required url %s returned code %s. not GCE"
-                        if not found:
-                            LOG.debug(msg, path, resp.code)
-                        else:
-                            LOG.warn(msg, path, resp.code)
-                        return False
-                    else:
-                        self.metadata[mkey] = None
-            except url_helper.UrlError as e:
-                if required:
-                    msg = "required url %s raised exception %s. not GCE"
-                    if not found:
-                        LOG.debug(msg, path, e)
-                    else:
-                        LOG.warn(msg, path, e)
-                    return False
-                msg = "Failed to get %s metadata item: %s."
-                LOG.debug(msg, path, e)
-
-                self.metadata[mkey] = None
+                    LOG.warn(msg, path)
+                return False
+            self.metadata[mkey] = value
 
         if self.metadata['public-keys']:
             lines = self.metadata['public-keys'].splitlines()
-- 
cgit v1.2.3


From 47eb1c4b52a2f5f4f8ea657918acd94209668bd7 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 20 Apr 2015 15:24:00 +0100
Subject: Rename found variable in GCE data source.

---
 cloudinit/sources/DataSourceGCE.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 255f5f45..9cf2f56e 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -93,14 +93,14 @@ class DataSourceGCE(sources.DataSource):
 
         metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
         # iterate over url_map keys to get metadata items
-        found = False
+        running_on_gce = False
         for (mkey, path, required, is_text) in url_map:
             value = metadata_fetcher.get_value(path, is_text)
             if value:
-                found = True
+                running_on_gce = True
             if required and value is None:
                 msg = "required url %s returned nothing. not GCE"
-                if not found:
+                if not running_on_gce:
                     LOG.debug(msg, path)
                 else:
                     LOG.warn(msg, path)
@@ -119,7 +119,7 @@ class DataSourceGCE(sources.DataSource):
             else:
                 LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
 
-        return found
+        return running_on_gce
 
     @property
     def launch_index(self):
-- 
cgit v1.2.3


From 6e84c05d2dc402de8cc4ae414af8657b97317218 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 20 Apr 2015 15:24:21 +0100
Subject: Support multiple metadata paths for metadata keys in GCE data source.

---
 cloudinit/sources/DataSourceGCE.py | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 9cf2f56e..1a133c28 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -77,12 +77,12 @@ class DataSourceGCE(sources.DataSource):
     def get_data(self):
         # url_map: (our-key, path, required, is_text)
         url_map = [
-            ('instance-id', 'instance/id', True, True),
-            ('availability-zone', 'instance/zone', True, True),
-            ('local-hostname', 'instance/hostname', True, True),
-            ('public-keys', 'project/attributes/sshKeys', False, True),
-            ('user-data', 'instance/attributes/user-data', False, False),
-            ('user-data-encoding', 'instance/attributes/user-data-encoding',
+            ('instance-id', ('instance/id',), True, True),
+            ('availability-zone', ('instance/zone',), True, True),
+            ('local-hostname', ('instance/hostname',), True, True),
+            ('public-keys', ('project/attributes/sshKeys',), False, True),
+            ('user-data', ('instance/attributes/user-data',), False, False),
+            ('user-data-encoding', ('instance/attributes/user-data-encoding',),
              False, True),
         ]
 
@@ -94,16 +94,20 @@ class DataSourceGCE(sources.DataSource):
         metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
         # iterate over url_map keys to get metadata items
         running_on_gce = False
-        for (mkey, path, required, is_text) in url_map:
-            value = metadata_fetcher.get_value(path, is_text)
+        for (mkey, paths, required, is_text) in url_map:
+            value = None
+            for path in paths:
+                new_value = metadata_fetcher.get_value(path, is_text)
+                if new_value is not None:
+                    value = new_value
             if value:
                 running_on_gce = True
             if required and value is None:
-                msg = "required url %s returned nothing. not GCE"
+                msg = "required key %s returned nothing. not GCE"
                 if not running_on_gce:
-                    LOG.debug(msg, path)
+                    LOG.debug(msg, mkey)
                 else:
-                    LOG.warn(msg, path)
+                    LOG.warn(msg, mkey)
                 return False
             self.metadata[mkey] = value
 
-- 
cgit v1.2.3


From 4fc65f02ae3fbf1a2062e6169ee39b5c5d5e23bc Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 20 Apr 2015 15:24:22 +0100
Subject: GCE instance-level SSH keys override project-level keys. (LP:
 #1403617)

---
 cloudinit/sources/DataSourceGCE.py          |  3 ++-
 tests/unittests/test_datasource/test_gce.py | 38 ++++++++++++++++++++++++++---
 2 files changed, 36 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 1a133c28..f4ed915d 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -80,7 +80,8 @@ class DataSourceGCE(sources.DataSource):
             ('instance-id', ('instance/id',), True, True),
             ('availability-zone', ('instance/zone',), True, True),
             ('local-hostname', ('instance/hostname',), True, True),
-            ('public-keys', ('project/attributes/sshKeys',), False, True),
+            ('public-keys', ('project/attributes/sshKeys',
+                             'instance/attributes/sshKeys'), False, True),
             ('user-data', ('instance/attributes/user-data',), False, False),
             ('user-data-encoding', ('instance/attributes/user-data-encoding',),
              False, True),
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 540a55d0..1fb100f7 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -113,10 +113,6 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
         self.assertEqual(GCE_META.get('instance/attributes/user-data'),
                          self.ds.get_userdata_raw())
 
-        # we expect a list of public ssh keys with user names stripped
-        self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
-                         self.ds.get_public_ssh_keys())
-
     # test partial metadata (missing user-data in particular)
     @httpretty.activate
     def test_metadata_partial(self):
@@ -152,3 +148,37 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
                                    body=_new_request_callback(meta))
             self.assertEqual(False, self.ds.get_data())
             httpretty.reset()
+
+    @httpretty.activate
+    def test_project_level_ssh_keys_are_used(self):
+        httpretty.register_uri(httpretty.GET, MD_URL_RE,
+                               body=_new_request_callback())
+        self.ds.get_data()
+
+        # we expect a list of public ssh keys with user names stripped
+        self.assertEqual(['ssh-rsa AA2..+aRD0fyVw== root@server'],
+                         self.ds.get_public_ssh_keys())
+
+    @httpretty.activate
+    def test_instance_level_ssh_keys_are_used(self):
+        key_content = 'ssh-rsa JustAUser root@server'
+        meta = GCE_META.copy()
+        meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+        httpretty.register_uri(httpretty.GET, MD_URL_RE,
+                               body=_new_request_callback(meta))
+        self.ds.get_data()
+
+        self.assertIn(key_content, self.ds.get_public_ssh_keys())
+
+    @httpretty.activate
+    def test_instance_level_keys_replace_project_level_keys(self):
+        key_content = 'ssh-rsa JustAUser root@server'
+        meta = GCE_META.copy()
+        meta['instance/attributes/sshKeys'] = 'user:{0}'.format(key_content)
+
+        httpretty.register_uri(httpretty.GET, MD_URL_RE,
+                               body=_new_request_callback(meta))
+        self.ds.get_data()
+
+        self.assertEqual([key_content], self.ds.get_public_ssh_keys())
-- 
cgit v1.2.3


From 96854d720d4bd356181acfa093744599a807ea8e Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 1 May 2015 05:38:56 -0400
Subject: fix 'Make pyflakes'

---
 Makefile                                                   | 2 +-
 cloudinit/config/cc_apt_pipelining.py                      | 2 +-
 cloudinit/config/cc_snappy.py                              | 2 --
 cloudinit/sources/DataSourceOpenNebula.py                  | 1 -
 tests/unittests/test_datasource/test_smartos.py            | 2 --
 tests/unittests/test_handler/test_handler_apt_configure.py | 1 -
 tests/unittests/test_handler/test_handler_snappy.py        | 5 -----
 tests/unittests/test_templating.py                         | 5 +----
 tools/hacking.py                                           | 2 +-
 tools/validate-yaml.py                                     | 3 +--
 10 files changed, 5 insertions(+), 20 deletions(-)

(limited to 'cloudinit')

diff --git a/Makefile b/Makefile
index 009257ca..bb0c5253 100644
--- a/Makefile
+++ b/Makefile
@@ -20,7 +20,7 @@ pep8:
 	@$(CWD)/tools/run-pep8 $(PY_FILES)
 
 pyflakes:
-	pyflakes $(PY_FILES)
+	@$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
 
 pip-requirements:
 	@echo "Installing cloud-init dependencies..."
diff --git a/cloudinit/config/cc_apt_pipelining.py b/cloudinit/config/cc_apt_pipelining.py
index e5629175..40c32c84 100644
--- a/cloudinit/config/cc_apt_pipelining.py
+++ b/cloudinit/config/cc_apt_pipelining.py
@@ -43,7 +43,7 @@ def handle(_name, cfg, _cloud, log, _args):
         write_apt_snippet("0", log, DEFAULT_FILE)
     elif apt_pipe_value_s in ("none", "unchanged", "os"):
         return
-    elif apt_pipe_value_s in [str(b) for b in xrange(0, 6)]:
+    elif apt_pipe_value_s in [str(b) for b in range(0, 6)]:
         write_apt_snippet(apt_pipe_value_s, log, DEFAULT_FILE)
     else:
         log.warn("Invalid option for apt_pipeling: %s", apt_pipe_value)
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index bfe76558..7aaec94a 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -42,12 +42,10 @@ Example config:
 """
 
 from cloudinit import log as logging
-from cloudinit import templater
 from cloudinit import util
 from cloudinit.settings import PER_INSTANCE
 
 import glob
-import six
 import tempfile
 import os
 
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index 61709c1b..ac2c3b45 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -24,7 +24,6 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import base64
 import os
 import pwd
 import re
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 28b41eaf..adee9019 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -36,8 +36,6 @@ from binascii import crc32
 import serial
 import six
 
-import six
-
 from cloudinit import helpers as c_helpers
 from cloudinit.sources import DataSourceSmartOS
 from cloudinit.util import b64e
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 02cad8b2..895728b3 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -7,7 +7,6 @@ import os
 import re
 import shutil
 import tempfile
-import unittest
 
 
 class TestAptProxyConfig(TestCase):
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index f3109bac..eceb14d9 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -38,7 +38,6 @@ class TestInstallPackages(t_help.TestCase):
         if 'args' not in kwargs:
             kwargs['args'] = args[0]
         self.subp_called.append(kwargs)
-        snap_cmds = []
         args = kwargs['args']
         # here we basically parse the snappy command invoked
         # and append to snapcmds a list of (mode, pkg, config)
@@ -117,9 +116,6 @@ class TestInstallPackages(t_help.TestCase):
     def test_package_ops_common_filename(self):
         # fish package name from filename
         # package names likely look like: pkgname.namespace_version_arch.snap
-        fname = "xkcd-webserver.canonical_0.3.4_all.snap"
-        name = "xkcd-webserver.canonical"
-        shortname = "xkcd-webserver"
 
         # find filenames
         self.populate_tmp(
@@ -165,7 +161,6 @@ class TestInstallPackages(t_help.TestCase):
             'ubuntu-core': {'c1': 'c2'},
             'notinstalled.smoser': {'s1': 's2'},
         }
-        cfg = {'config-example-k1': 'config-example-k2'}
         ret = get_package_ops(
             packages=['config-example.canonical'], configs=cfgs,
             installed=['config-example.smoser', 'pkg1.canonical',
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index cf7c03b0..0c19a2c2 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -18,10 +18,6 @@
 
 from __future__ import print_function
 
-import sys
-import six
-import unittest
-
 from . import helpers as test_helpers
 import textwrap
 
@@ -30,6 +26,7 @@ from cloudinit import templater
 try:
     import Cheetah
     HAS_CHEETAH = True
+    Cheetah  # make pyflakes happy, as Cheetah is not used here
 except ImportError:
     HAS_CHEETAH = False
 
diff --git a/tools/hacking.py b/tools/hacking.py
index e7797564..3175df38 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -128,7 +128,7 @@ def cloud_docstring_multiline_end(physical_line):
     """
     pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])  # start
     if (pos != -1 and len(physical_line) == pos):
-        print physical_line
+        print(physical_line)
         if (physical_line[pos + 3] == ' '):
             return (pos, "N403: multi line docstring end on new line")
 
diff --git a/tools/validate-yaml.py b/tools/validate-yaml.py
index eda59cb8..6e164590 100755
--- a/tools/validate-yaml.py
+++ b/tools/validate-yaml.py
@@ -4,7 +4,6 @@
 """
 
 import sys
-
 import yaml
 
 
@@ -17,7 +16,7 @@ if __name__ == "__main__":
             yaml.safe_load(fh.read())
             fh.close()
             sys.stdout.write(" - ok\n")
-        except Exception, e:
+        except Exception as e:
             sys.stdout.write(" - bad (%s)\n" % (e))
             bads += 1
     if bads > 0:
-- 
cgit v1.2.3


From 6ddf7beb112f016be7ebd6fe296de6eaaf3aa9ca Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Thu, 7 May 2015 14:46:47 +0100
Subject: Implement basic replacement for walinuxagent in Azure data source.

---
 cloudinit/sources/DataSourceAzure.py          | 292 +++++++++++++++++++----
 tests/unittests/test_datasource/test_azure.py | 331 ++++++++++++++++++++++++++
 2 files changed, 574 insertions(+), 49 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index a19d9ca2..bd3c742b 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,8 +22,14 @@ import crypt
 import fnmatch
 import os
 import os.path
+import re
+import socket
+import struct
+import tempfile
 import time
+from contextlib import contextmanager
 from xml.dom import minidom
+from xml.etree import ElementTree
 
 from cloudinit import log as logging
 from cloudinit.settings import PER_ALWAYS
@@ -34,13 +40,11 @@ LOG = logging.getLogger(__name__)
 
 DS_NAME = 'Azure'
 DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
-AGENT_START = ['service', 'walinuxagent', 'start']
 BOUNCE_COMMAND = ['sh', '-xc',
     "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
 DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
 
 BUILTIN_DS_CONFIG = {
-    'agent_command': AGENT_START,
     'data_dir': "/var/lib/waagent",
     'set_hostname': True,
     'hostname_bounce': {
@@ -66,6 +70,231 @@ BUILTIN_CLOUD_CONFIG = {
 DS_CFG_PATH = ['datasource', DS_NAME]
 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
 
+REPORT_READY_XML_TEMPLATE = """\
+<?xml version=\"1.0\" encoding=\"utf-8\"?>
+<Health xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">
+  <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
+  <Container>
+    <ContainerId>{container_id}</ContainerId>
+    <RoleInstanceList>
+      <Role>
+        <InstanceId>{instance_id}</InstanceId>
+        <Health>
+          <State>Ready</State>
+        </Health>
+      </Role>
+    </RoleInstanceList>
+  </Container>
+</Health>"""
+
+
+@contextmanager
+def cd(newdir):
+    prevdir = os.getcwd()
+    os.chdir(os.path.expanduser(newdir))
+    try:
+        yield
+    finally:
+        os.chdir(prevdir)
+
+
+class AzureEndpointHttpClient(object):
+
+    headers = {
+        'x-ms-agent-name': 'WALinuxAgent',
+        'x-ms-version': '2012-11-30',
+    }
+
+    def __init__(self, certificate):
+        self.extra_secure_headers = {
+            "x-ms-cipher-name": "DES_EDE3_CBC",
+            "x-ms-guest-agent-public-x509-cert": certificate,
+        }
+
+    def get(self, url, secure=False):
+        headers = self.headers
+        if secure:
+            headers = self.headers.copy()
+            headers.update(self.extra_secure_headers)
+        return util.read_file_or_url(url, headers=headers)
+
+    def post(self, url, data=None, extra_headers=None):
+        headers = self.headers
+        if extra_headers is not None:
+            headers = self.headers.copy()
+            headers.update(extra_headers)
+        return util.read_file_or_url(url, data=data, headers=headers)
+
+
+def find_endpoint():
+    content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+    value = None
+    for line in content.splitlines():
+        if 'unknown-245' in line:
+            value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+    if value is None:
+        raise Exception('No endpoint found in DHCP config.')
+    if ':' in value:
+        hex_string = ''
+        for hex_pair in value.split(':'):
+            if len(hex_pair) == 1:
+                hex_pair = '0' + hex_pair
+            hex_string += hex_pair
+        value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
+    else:
+        value = value.encode('utf-8')
+    return socket.inet_ntoa(value)
+
+
+class GoalState(object):
+
+    def __init__(self, xml, http_client):
+        self.http_client = http_client
+        self.root = ElementTree.fromstring(xml)
+
+    def _text_from_xpath(self, xpath):
+        element = self.root.find(xpath)
+        if element is not None:
+            return element.text
+        return None
+
+    @property
+    def container_id(self):
+        return self._text_from_xpath('./Container/ContainerId')
+
+    @property
+    def incarnation(self):
+        return self._text_from_xpath('./Incarnation')
+
+    @property
+    def instance_id(self):
+        return self._text_from_xpath(
+            './Container/RoleInstanceList/RoleInstance/InstanceId')
+
+    @property
+    def shared_config_xml(self):
+        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
+                                    '/Configuration/SharedConfig')
+        return self.http_client.get(url).contents
+
+    @property
+    def certificates_xml(self):
+        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
+                                    '/Configuration/Certificates')
+        if url is not None:
+            return self.http_client.get(url, secure=True).contents
+        return None
+
+
+class OpenSSLManager(object):
+
+    certificate_names = {
+        'private_key': 'TransportPrivate.pem',
+        'certificate': 'TransportCert.pem',
+    }
+
+    def __init__(self):
+        self.tmpdir = tempfile.TemporaryDirectory()
+        self.certificate = None
+        self.generate_certificate()
+
+    def generate_certificate(self):
+        if self.certificate is not None:
+            return
+        with cd(self.tmpdir.name):
+            util.subp([
+                'openssl', 'req', '-x509', '-nodes', '-subj',
+                '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
+                '-keyout', self.certificate_names['private_key'],
+                '-out', self.certificate_names['certificate'],
+            ])
+            certificate = ''
+            for line in open(self.certificate_names['certificate']):
+                if "CERTIFICATE" not in line:
+                    certificate += line.rstrip()
+            self.certificate = certificate
+
+    def parse_certificates(self, certificates_xml):
+        tag = ElementTree.fromstring(certificates_xml).find(
+            './/Data')
+        certificates_content = tag.text
+        lines = [
+            b'MIME-Version: 1.0',
+            b'Content-Disposition: attachment; filename="Certificates.p7m"',
+            b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
+            b'Content-Transfer-Encoding: base64',
+            b'',
+            certificates_content.encode('utf-8'),
+        ]
+        with cd(self.tmpdir.name):
+            with open('Certificates.p7m', 'wb') as f:
+                f.write(b'\n'.join(lines))
+            out, _ = util.subp(
+                'openssl cms -decrypt -in Certificates.p7m -inkey'
+                ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+                ' -password pass:'.format(**self.certificate_names),
+                shell=True)
+        private_keys, certificates = [], []
+        current = []
+        for line in out.splitlines():
+            current.append(line)
+            if re.match(r'[-]+END .*?KEY[-]+$', line):
+                private_keys.append('\n'.join(current))
+                current = []
+            elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+                certificates.append('\n'.join(current))
+                current = []
+        keys = []
+        for certificate in certificates:
+            with cd(self.tmpdir.name):
+                public_key, _ = util.subp(
+                    'openssl x509 -noout -pubkey |'
+                    'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+                    data=certificate,
+                    shell=True)
+            keys.append(public_key)
+        return keys
+
+
+class WALinuxAgentShim(object):
+
+    def __init__(self):
+        self.endpoint = find_endpoint()
+        self.goal_state = None
+        self.openssl_manager = OpenSSLManager()
+        self.http_client = AzureEndpointHttpClient(
+            self.openssl_manager.certificate)
+        self.values = {}
+
+    def register_with_azure_and_fetch_data(self):
+        LOG.info('Registering with Azure...')
+        for i in range(10):
+            try:
+                response = self.http_client.get(
+                    'http://{}/machine/?comp=goalstate'.format(self.endpoint))
+            except Exception:
+                time.sleep(i + 1)
+            else:
+                break
+        self.goal_state = GoalState(response.contents, self.http_client)
+        self.public_keys = []
+        if self.goal_state.certificates_xml is not None:
+            self.public_keys = self.openssl_manager.parse_certificates(
+                self.goal_state.certificates_xml)
+        self._report_ready()
+
+    def _report_ready(self):
+        document = REPORT_READY_XML_TEMPLATE.format(
+            incarnation=self.goal_state.incarnation,
+            container_id=self.goal_state.container_id,
+            instance_id=self.goal_state.instance_id,
+        )
+        self.http_client.post(
+            "http://{}/machine?comp=health".format(self.endpoint),
+            data=document,
+            extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+        )
+
 
 def get_hostname(hostname_command='hostname'):
     return util.subp(hostname_command, capture=True)[0].strip()
@@ -185,53 +414,17 @@ class DataSourceAzureNet(sources.DataSource):
         # the directory to be protected.
         write_files(ddir, files, dirmode=0o700)
 
-        temp_hostname = self.metadata.get('local-hostname')
-        hostname_command = mycfg['hostname_bounce']['hostname_command']
-        with temporary_hostname(temp_hostname, mycfg,
-                                hostname_command=hostname_command) \
-                as previous_hostname:
-            if (previous_hostname is not None
-                    and util.is_true(mycfg.get('set_hostname'))):
-                cfg = mycfg['hostname_bounce']
-                try:
-                    perform_hostname_bounce(hostname=temp_hostname,
-                                            cfg=cfg,
-                                            prev_hostname=previous_hostname)
-                except Exception as e:
-                    LOG.warn("Failed publishing hostname: %s", e)
-                    util.logexc(LOG, "handling set_hostname failed")
+        shim = WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
 
-            try:
-                invoke_agent(mycfg['agent_command'])
-            except util.ProcessExecutionError:
-                # claim the datasource even if the command failed
-                util.logexc(LOG, "agent command '%s' failed.",
-                            mycfg['agent_command'])
-
-            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
-            wait_for = [shcfgxml]
-
-            fp_files = []
-            for pk in self.cfg.get('_pubkeys', []):
-                bname = str(pk['fingerprint'] + ".crt")
-                fp_files += [os.path.join(ddir, bname)]
-
-            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
-                                    func=wait_for_files,
-                                    args=(wait_for + fp_files,))
-        if len(missing):
-            LOG.warn("Did not find files, but going on: %s", missing)
-
-        if shcfgxml in missing:
-            LOG.warn("SharedConfig.xml missing, using static instance-id")
-        else:
-            try:
-                self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
-            except ValueError as e:
-                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+        try:
+            self.metadata['instance-id'] = iid_from_shared_config_content(
+                shim.goal_state.shared_config_xml)
+        except ValueError as e:
+            LOG.warn(
+                "failed to get instance id in %s: %s", shim.shared_config, e)
 
-        pubkeys = pubkeys_from_crt_files(fp_files)
-        self.metadata['public-keys'] = pubkeys
+        self.metadata['public-keys'] = shim.public_keys
 
         found_ephemeral = find_ephemeral_disk()
         if found_ephemeral:
@@ -363,10 +556,11 @@ def perform_hostname_bounce(hostname, cfg, prev_hostname):
                           'env': env})
 
 
-def crtfile_to_pubkey(fname):
+def crtfile_to_pubkey(fname, data=None):
     pipeline = ('openssl x509 -noout -pubkey < "$0" |'
                 'ssh-keygen -i -m PKCS8 -f /dev/stdin')
-    (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
+    (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+                            capture=True, data=data)
     return out.rstrip()
 
 
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 7e789853..dc7f2663 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -15,11 +15,48 @@ except ImportError:
 import crypt
 import os
 import stat
+import struct
 import yaml
 import shutil
 import tempfile
 import unittest
 
+from cloudinit import url_helper
+
+
+GOAL_STATE_TEMPLATE = """\
+<?xml version="1.0" encoding="utf-8"?>
+<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="goalstate10.xsd">
+  <Version>2012-11-30</Version>
+  <Incarnation>{incarnation}</Incarnation>
+  <Machine>
+    <ExpectedState>Started</ExpectedState>
+    <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
+    <LBProbePorts>
+      <Port>16001</Port>
+    </LBProbePorts>
+    <ExpectHealthReport>FALSE</ExpectHealthReport>
+  </Machine>
+  <Container>
+    <ContainerId>{container_id}</ContainerId>
+    <RoleInstanceList>
+      <RoleInstance>
+        <InstanceId>{instance_id}</InstanceId>
+        <State>Started</State>
+        <Configuration>
+          <HostingEnvironmentConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>
+          <SharedConfig>{shared_config_url}</SharedConfig>
+          <ExtensionsConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=extensionsConfig&amp;incarnation=1</ExtensionsConfig>
+          <FullConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=fullConfig&amp;incarnation=1</FullConfig>
+          <Certificates>{certificates_url}</Certificates>
+          <ConfigName>68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml</ConfigName>
+        </Configuration>
+      </RoleInstance>
+    </RoleInstanceList>
+  </Container>
+</GoalState>
+"""
+
 
 def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
     if data is None:
@@ -579,3 +616,297 @@ class TestReadAzureSharedConfig(unittest.TestCase):
             </SharedConfig>"""
         ret = DataSourceAzure.iid_from_shared_config_content(xml)
         self.assertEqual("MY_INSTANCE_ID", ret)
+
+
+class TestFindEndpoint(TestCase):
+
+    def setUp(self):
+        super(TestFindEndpoint, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.load_file = patches.enter_context(
+            mock.patch.object(DataSourceAzure.util, 'load_file'))
+
+    def test_missing_file(self):
+        self.load_file.side_effect = IOError
+        self.assertRaises(IOError, DataSourceAzure.find_endpoint)
+
+    def test_missing_special_azure_line(self):
+        self.load_file.return_value = ''
+        self.assertRaises(Exception, DataSourceAzure.find_endpoint)
+
+    def _build_lease_content(self, ip_address, use_hex=True):
+        ip_address_repr = ':'.join(
+            [hex(int(part)).replace('0x', '')
+             for part in ip_address.split('.')])
+        if not use_hex:
+            ip_address_repr = struct.pack(
+                '>L', int(ip_address_repr.replace(':', ''), 16))
+            ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
+        return '\n'.join([
+            'lease {',
+            ' interface "eth0";',
+            ' option unknown-245 {0};'.format(ip_address_repr),
+            '}'])
+
+    def test_hex_string(self):
+        ip_address = '98.76.54.32'
+        file_content = self._build_lease_content(ip_address)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+
+    def test_hex_string_with_single_character_part(self):
+        ip_address = '4.3.2.1'
+        file_content = self._build_lease_content(ip_address)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+
+    def test_packed_string(self):
+        ip_address = '98.76.54.32'
+        file_content = self._build_lease_content(ip_address, use_hex=False)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+
+    def test_latest_lease_used(self):
+        ip_addresses = ['4.3.2.1', '98.76.54.32']
+        file_content = '\n'.join([self._build_lease_content(ip_address)
+                                  for ip_address in ip_addresses])
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_addresses[-1], DataSourceAzure.find_endpoint())
+
+
+class TestGoalStateParsing(TestCase):
+
+    default_parameters = {
+        'incarnation': 1,
+        'container_id': 'MyContainerId',
+        'instance_id': 'MyInstanceId',
+        'shared_config_url': 'MySharedConfigUrl',
+        'certificates_url': 'MyCertificatesUrl',
+    }
+
+    def _get_goal_state(self, http_client=None, **kwargs):
+        if http_client is None:
+            http_client = mock.MagicMock()
+        parameters = self.default_parameters.copy()
+        parameters.update(kwargs)
+        xml = GOAL_STATE_TEMPLATE.format(**parameters)
+        if parameters['certificates_url'] is None:
+            new_xml_lines = []
+            for line in xml.splitlines():
+                if 'Certificates' in line:
+                    continue
+                new_xml_lines.append(line)
+            xml = '\n'.join(new_xml_lines)
+        return DataSourceAzure.GoalState(xml, http_client)
+
+    def test_incarnation_parsed_correctly(self):
+        incarnation = '123'
+        goal_state = self._get_goal_state(incarnation=incarnation)
+        self.assertEqual(incarnation, goal_state.incarnation)
+
+    def test_container_id_parsed_correctly(self):
+        container_id = 'TestContainerId'
+        goal_state = self._get_goal_state(container_id=container_id)
+        self.assertEqual(container_id, goal_state.container_id)
+
+    def test_instance_id_parsed_correctly(self):
+        instance_id = 'TestInstanceId'
+        goal_state = self._get_goal_state(instance_id=instance_id)
+        self.assertEqual(instance_id, goal_state.instance_id)
+
+    def test_shared_config_xml_parsed_and_fetched_correctly(self):
+        http_client = mock.MagicMock()
+        shared_config_url = 'TestSharedConfigUrl'
+        goal_state = self._get_goal_state(
+            http_client=http_client, shared_config_url=shared_config_url)
+        shared_config_xml = goal_state.shared_config_xml
+        self.assertEqual(1, http_client.get.call_count)
+        self.assertEqual(shared_config_url, http_client.get.call_args[0][0])
+        self.assertEqual(http_client.get.return_value.contents,
+                         shared_config_xml)
+
+    def test_certificates_xml_parsed_and_fetched_correctly(self):
+        http_client = mock.MagicMock()
+        certificates_url = 'TestSharedConfigUrl'
+        goal_state = self._get_goal_state(
+            http_client=http_client, certificates_url=certificates_url)
+        certificates_xml = goal_state.certificates_xml
+        self.assertEqual(1, http_client.get.call_count)
+        self.assertEqual(certificates_url, http_client.get.call_args[0][0])
+        self.assertTrue(http_client.get.call_args[1].get('secure', False))
+        self.assertEqual(http_client.get.return_value.contents,
+                         certificates_xml)
+
+    def test_missing_certificates_skips_http_get(self):
+        http_client = mock.MagicMock()
+        goal_state = self._get_goal_state(
+            http_client=http_client, certificates_url=None)
+        certificates_xml = goal_state.certificates_xml
+        self.assertEqual(0, http_client.get.call_count)
+        self.assertIsNone(certificates_xml)
+
+
+class TestAzureEndpointHttpClient(TestCase):
+
+    regular_headers = {
+        'x-ms-agent-name': 'WALinuxAgent',
+        'x-ms-version': '2012-11-30',
+    }
+
+    def setUp(self):
+        super(TestAzureEndpointHttpClient, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.read_file_or_url = patches.enter_context(
+            mock.patch.object(DataSourceAzure.util, 'read_file_or_url'))
+
+    def test_non_secure_get(self):
+        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
+        url = 'MyTestUrl'
+        response = client.get(url, secure=False)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(mock.call(url, headers=self.regular_headers),
+                         self.read_file_or_url.call_args)
+
+    def test_secure_get(self):
+        url = 'MyTestUrl'
+        certificate = mock.MagicMock()
+        expected_headers = self.regular_headers.copy()
+        expected_headers.update({
+            "x-ms-cipher-name": "DES_EDE3_CBC",
+            "x-ms-guest-agent-public-x509-cert": certificate,
+        })
+        client = DataSourceAzure.AzureEndpointHttpClient(certificate)
+        response = client.get(url, secure=True)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(mock.call(url, headers=expected_headers),
+                         self.read_file_or_url.call_args)
+
+    def test_post(self):
+        data = mock.MagicMock()
+        url = 'MyTestUrl'
+        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
+        response = client.post(url, data=data)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(
+            mock.call(url, data=data, headers=self.regular_headers),
+            self.read_file_or_url.call_args)
+
+    def test_post_with_extra_headers(self):
+        url = 'MyTestUrl'
+        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
+        extra_headers = {'test': 'header'}
+        client.post(url, extra_headers=extra_headers)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        expected_headers = self.regular_headers.copy()
+        expected_headers.update(extra_headers)
+        self.assertEqual(
+            mock.call(mock.ANY, data=mock.ANY, headers=expected_headers),
+            self.read_file_or_url.call_args)
+
+
+class TestOpenSSLManager(TestCase):
+
+    def setUp(self):
+        super(TestOpenSSLManager, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.subp = patches.enter_context(
+            mock.patch.object(DataSourceAzure.util, 'subp'))
+
+    @mock.patch.object(DataSourceAzure, 'cd', mock.MagicMock())
+    @mock.patch.object(DataSourceAzure.tempfile, 'TemporaryDirectory')
+    def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory):
+        manager = DataSourceAzure.OpenSSLManager()
+        self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir)
+
+    @mock.patch('builtins.open')
+    def test_generate_certificate_uses_tmpdir(self, open):
+        subp_directory = {}
+
+        def capture_directory(*args, **kwargs):
+            subp_directory['path'] = os.getcwd()
+
+        self.subp.side_effect = capture_directory
+        manager = DataSourceAzure.OpenSSLManager()
+        self.assertEqual(manager.tmpdir.name, subp_directory['path'])
+
+
+class TestWALinuxAgentShim(TestCase):
+
+    def setUp(self):
+        super(TestWALinuxAgentShim, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.AzureEndpointHttpClient = patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient'))
+        self.find_endpoint = patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'find_endpoint'))
+        self.GoalState = patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'GoalState'))
+        self.OpenSSLManager = patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'OpenSSLManager'))
+
+    def test_http_client_uses_certificate(self):
+        shim = DataSourceAzure.WALinuxAgentShim()
+        self.assertEqual(
+            [mock.call(self.OpenSSLManager.return_value.certificate)],
+            self.AzureEndpointHttpClient.call_args_list)
+        self.assertEqual(self.AzureEndpointHttpClient.return_value,
+                         shim.http_client)
+
+    def test_correct_url_used_for_goalstate(self):
+        self.find_endpoint.return_value = 'test_endpoint'
+        shim = DataSourceAzure.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        get = self.AzureEndpointHttpClient.return_value.get
+        self.assertEqual(
+            [mock.call('http://test_endpoint/machine/?comp=goalstate')],
+            get.call_args_list)
+        self.assertEqual(
+            [mock.call(get.return_value.contents, shim.http_client)],
+            self.GoalState.call_args_list)
+
+    def test_certificates_used_to_determine_public_keys(self):
+        shim = DataSourceAzure.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        self.assertEqual(
+            [mock.call(self.GoalState.return_value.certificates_xml)],
+            self.OpenSSLManager.return_value.parse_certificates.call_args_list)
+        self.assertEqual(
+            self.OpenSSLManager.return_value.parse_certificates.return_value,
+            shim.public_keys)
+
+    def test_absent_certificates_produces_empty_public_keys(self):
+        self.GoalState.return_value.certificates_xml = None
+        shim = DataSourceAzure.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        self.assertEqual([], shim.public_keys)
+
+    def test_correct_url_used_for_report_ready(self):
+        self.find_endpoint.return_value = 'test_endpoint'
+        shim = DataSourceAzure.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        expected_url = 'http://test_endpoint/machine?comp=health'
+        self.assertEqual(
+            [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+            shim.http_client.post.call_args_list)
+
+    def test_goal_state_values_used_for_report_ready(self):
+        self.GoalState.return_value.incarnation = 'TestIncarnation'
+        self.GoalState.return_value.container_id = 'TestContainerId'
+        self.GoalState.return_value.instance_id = 'TestInstanceId'
+        shim = DataSourceAzure.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        posted_document = shim.http_client.post.call_args[1]['data']
+        self.assertIn('TestIncarnation', posted_document)
+        self.assertIn('TestContainerId', posted_document)
+        self.assertIn('TestInstanceId', posted_document)
-- 
cgit v1.2.3


From 2edfd791b29df3271bdc3aff40d60336ddd636ed Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 12:58:18 +0100
Subject: Return a dict of data from WALinuxAgentShim, rather than accessing
 attributes.

---
 cloudinit/sources/DataSourceAzure.py          | 46 +++++++++++++++------------
 tests/unittests/test_datasource/test_azure.py | 29 ++++++++++++++---
 2 files changed, 49 insertions(+), 26 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index bd3c742b..b93357d5 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -260,7 +260,6 @@ class WALinuxAgentShim(object):
 
     def __init__(self):
         self.endpoint = find_endpoint()
-        self.goal_state = None
         self.openssl_manager = OpenSSLManager()
         self.http_client = AzureEndpointHttpClient(
             self.openssl_manager.certificate)
@@ -276,18 +275,24 @@ class WALinuxAgentShim(object):
                 time.sleep(i + 1)
             else:
                 break
-        self.goal_state = GoalState(response.contents, self.http_client)
-        self.public_keys = []
-        if self.goal_state.certificates_xml is not None:
-            self.public_keys = self.openssl_manager.parse_certificates(
-                self.goal_state.certificates_xml)
-        self._report_ready()
-
-    def _report_ready(self):
+        goal_state = GoalState(response.contents, self.http_client)
+        public_keys = []
+        if goal_state.certificates_xml is not None:
+            public_keys = self.openssl_manager.parse_certificates(
+                goal_state.certificates_xml)
+        data = {
+            'instance-id': iid_from_shared_config_content(
+                goal_state.shared_config_xml),
+            'public-keys': public_keys,
+        }
+        self._report_ready(goal_state)
+        return data
+
+    def _report_ready(self, goal_state):
         document = REPORT_READY_XML_TEMPLATE.format(
-            incarnation=self.goal_state.incarnation,
-            container_id=self.goal_state.container_id,
-            instance_id=self.goal_state.instance_id,
+            incarnation=goal_state.incarnation,
+            container_id=goal_state.container_id,
+            instance_id=goal_state.instance_id,
         )
         self.http_client.post(
             "http://{}/machine?comp=health".format(self.endpoint),
@@ -414,17 +419,16 @@ class DataSourceAzureNet(sources.DataSource):
         # the directory to be protected.
         write_files(ddir, files, dirmode=0o700)
 
-        shim = WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
-
         try:
-            self.metadata['instance-id'] = iid_from_shared_config_content(
-                shim.goal_state.shared_config_xml)
-        except ValueError as e:
-            LOG.warn(
-                "failed to get instance id in %s: %s", shim.shared_config, e)
+            shim = WALinuxAgentShim()
+            data = shim.register_with_azure_and_fetch_data()
+        except Exception as exc:
+            LOG.info("Error communicating with Azure fabric; assume we aren't"
+                     " on Azure.", exc_info=True)
+            return False
 
-        self.metadata['public-keys'] = shim.public_keys
+        self.metadata['instance-id'] = data['instance-id']
+        self.metadata['public-keys'] = data['public-keys']
 
         found_ephemeral = find_ephemeral_disk()
         if found_ephemeral:
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index dc7f2663..fd5b24f8 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -160,6 +160,12 @@ class TestAzureDataSource(TestCase):
         mod = DataSourceAzure
         mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
 
+        fake_shim = mock.MagicMock()
+        fake_shim().register_with_azure_and_fetch_data.return_value = {
+            'instance-id': 'i-my-azure-id',
+            'public-keys': [],
+        }
+
         self.apply_patches([
             (mod, 'list_possible_azure_ds_devs', dsdevs),
             (mod, 'invoke_agent', _invoke_agent),
@@ -169,7 +175,8 @@ class TestAzureDataSource(TestCase):
             (mod, 'perform_hostname_bounce', mock.MagicMock()),
             (mod, 'get_hostname', mock.MagicMock()),
             (mod, 'set_hostname', mock.MagicMock()),
-            ])
+            (mod, 'WALinuxAgentShim', fake_shim),
+        ])
 
         dsrc = mod.DataSourceAzureNet(
             data.get('sys_cfg', {}), distro=None, paths=self.paths)
@@ -852,6 +859,9 @@ class TestWALinuxAgentShim(TestCase):
             mock.patch.object(DataSourceAzure, 'find_endpoint'))
         self.GoalState = patches.enter_context(
             mock.patch.object(DataSourceAzure, 'GoalState'))
+        self.iid_from_shared_config_content = patches.enter_context(
+            mock.patch.object(DataSourceAzure,
+                              'iid_from_shared_config_content'))
         self.OpenSSLManager = patches.enter_context(
             mock.patch.object(DataSourceAzure, 'OpenSSLManager'))
 
@@ -877,19 +887,28 @@ class TestWALinuxAgentShim(TestCase):
 
     def test_certificates_used_to_determine_public_keys(self):
         shim = DataSourceAzure.WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
+        data = shim.register_with_azure_and_fetch_data()
         self.assertEqual(
             [mock.call(self.GoalState.return_value.certificates_xml)],
             self.OpenSSLManager.return_value.parse_certificates.call_args_list)
         self.assertEqual(
             self.OpenSSLManager.return_value.parse_certificates.return_value,
-            shim.public_keys)
+            data['public-keys'])
 
     def test_absent_certificates_produces_empty_public_keys(self):
         self.GoalState.return_value.certificates_xml = None
         shim = DataSourceAzure.WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
-        self.assertEqual([], shim.public_keys)
+        data = shim.register_with_azure_and_fetch_data()
+        self.assertEqual([], data['public-keys'])
+
+    def test_instance_id_returned_in_data(self):
+        shim = DataSourceAzure.WALinuxAgentShim()
+        data = shim.register_with_azure_and_fetch_data()
+        self.assertEqual(
+            [mock.call(self.GoalState.return_value.shared_config_xml)],
+            self.iid_from_shared_config_content.call_args_list)
+        self.assertEqual(self.iid_from_shared_config_content.return_value,
+                         data['instance-id'])
 
     def test_correct_url_used_for_report_ready(self):
         self.find_endpoint.return_value = 'test_endpoint'
-- 
cgit v1.2.3


From 28e9e693942d758fb5bdc952c32542c77e16f23a Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 12:58:20 +0100
Subject: Add logging.

---
 cloudinit/sources/DataSourceAzure.py | 13 ++++++++++++-
 1 file changed, 12 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index b93357d5..deffd9b2 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -127,6 +127,7 @@ class AzureEndpointHttpClient(object):
 
 
 def find_endpoint():
+    LOG.debug('Finding Azure endpoint...')
     content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
     value = None
     for line in content.splitlines():
@@ -143,7 +144,9 @@ def find_endpoint():
         value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
     else:
         value = value.encode('utf-8')
-    return socket.inet_ntoa(value)
+    endpoint_ip_address = socket.inet_ntoa(value)
+    LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+    return endpoint_ip_address
 
 
 class GoalState(object):
@@ -199,7 +202,9 @@ class OpenSSLManager(object):
         self.generate_certificate()
 
     def generate_certificate(self):
+        LOG.debug('Generating certificate for communication with fabric...')
         if self.certificate is not None:
+            LOG.debug('Certificate already generated.')
             return
         with cd(self.tmpdir.name):
             util.subp([
@@ -213,6 +218,7 @@ class OpenSSLManager(object):
                 if "CERTIFICATE" not in line:
                     certificate += line.rstrip()
             self.certificate = certificate
+        LOG.debug('New certificate generated.')
 
     def parse_certificates(self, certificates_xml):
         tag = ElementTree.fromstring(certificates_xml).find(
@@ -259,6 +265,7 @@ class OpenSSLManager(object):
 class WALinuxAgentShim(object):
 
     def __init__(self):
+        LOG.debug('WALinuxAgentShim instantiated...')
         self.endpoint = find_endpoint()
         self.openssl_manager = OpenSSLManager()
         self.http_client = AzureEndpointHttpClient(
@@ -275,9 +282,11 @@ class WALinuxAgentShim(object):
                 time.sleep(i + 1)
             else:
                 break
+        LOG.debug('Successfully fetched GoalState XML.')
         goal_state = GoalState(response.contents, self.http_client)
         public_keys = []
         if goal_state.certificates_xml is not None:
+            LOG.debug('Certificate XML found; parsing out public keys.')
             public_keys = self.openssl_manager.parse_certificates(
                 goal_state.certificates_xml)
         data = {
@@ -289,6 +298,7 @@ class WALinuxAgentShim(object):
         return data
 
     def _report_ready(self, goal_state):
+        LOG.debug('Reporting ready to Azure fabric.')
         document = REPORT_READY_XML_TEMPLATE.format(
             incarnation=goal_state.incarnation,
             container_id=goal_state.container_id,
@@ -299,6 +309,7 @@ class WALinuxAgentShim(object):
             data=document,
             extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
         )
+        LOG.info('Reported ready to Azure fabric.')
 
 
 def get_hostname(hostname_command='hostname'):
-- 
cgit v1.2.3


From 4a2b6ef37578b13d7240dc1447bbb715b8a0a077 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 12:58:20 +0100
Subject: Cache certificate response to save on communication with fabric.

---
 cloudinit/sources/DataSourceAzure.py | 14 +++++++++-----
 1 file changed, 9 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index deffd9b2..c783732d 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -154,6 +154,7 @@ class GoalState(object):
     def __init__(self, xml, http_client):
         self.http_client = http_client
         self.root = ElementTree.fromstring(xml)
+        self._certificates_xml = None
 
     def _text_from_xpath(self, xpath):
         element = self.root.find(xpath)
@@ -182,11 +183,14 @@ class GoalState(object):
 
     @property
     def certificates_xml(self):
-        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
-                                    '/Configuration/Certificates')
-        if url is not None:
-            return self.http_client.get(url, secure=True).contents
-        return None
+        if self._certificates_xml is None:
+            url = self._text_from_xpath(
+                './Container/RoleInstanceList/RoleInstance'
+                '/Configuration/Certificates')
+            if url is not None:
+                self._certificates_xml = self.http_client.get(
+                    url, secure=True).contents
+        return self._certificates_xml
 
 
 class OpenSSLManager(object):
-- 
cgit v1.2.3


From 7ca682408f857fcfd04bfc026ea6c697c1fd4b86 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 12:59:57 +0100
Subject: Make find_endpoint a staticmethod to clean up top-level namespace.

---
 cloudinit/sources/DataSourceAzure.py          | 84 ++++++++++++++-------------
 tests/unittests/test_datasource/test_azure.py | 21 ++++---
 2 files changed, 57 insertions(+), 48 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index c783732d..ba4afa5f 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -70,22 +70,6 @@ BUILTIN_CLOUD_CONFIG = {
 DS_CFG_PATH = ['datasource', DS_NAME]
 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
 
-REPORT_READY_XML_TEMPLATE = """\
-<?xml version=\"1.0\" encoding=\"utf-8\"?>
-<Health xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\">
-  <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>
-  <Container>
-    <ContainerId>{container_id}</ContainerId>
-    <RoleInstanceList>
-      <Role>
-        <InstanceId>{instance_id}</InstanceId>
-        <Health>
-          <State>Ready</State>
-        </Health>
-      </Role>
-    </RoleInstanceList>
-  </Container>
-</Health>"""
 
 
 @contextmanager
@@ -126,29 +110,6 @@ class AzureEndpointHttpClient(object):
         return util.read_file_or_url(url, data=data, headers=headers)
 
 
-def find_endpoint():
-    LOG.debug('Finding Azure endpoint...')
-    content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
-    value = None
-    for line in content.splitlines():
-        if 'unknown-245' in line:
-            value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
-    if value is None:
-        raise Exception('No endpoint found in DHCP config.')
-    if ':' in value:
-        hex_string = ''
-        for hex_pair in value.split(':'):
-            if len(hex_pair) == 1:
-                hex_pair = '0' + hex_pair
-            hex_string += hex_pair
-        value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
-    else:
-        value = value.encode('utf-8')
-    endpoint_ip_address = socket.inet_ntoa(value)
-    LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
-    return endpoint_ip_address
-
-
 class GoalState(object):
 
     def __init__(self, xml, http_client):
@@ -268,14 +229,55 @@ class OpenSSLManager(object):
 
 class WALinuxAgentShim(object):
 
+    REPORT_READY_XML_TEMPLATE = '\n'.join([
+        '<?xml version="1.0" encoding="utf-8"?>',
+        '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+        ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
+        '  <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
+        '  <Container>',
+        '    <ContainerId>{container_id}</ContainerId>',
+        '    <RoleInstanceList>',
+        '      <Role>',
+        '        <InstanceId>{instance_id}</InstanceId>',
+        '        <Health>',
+        '          <State>Ready</State>',
+        '        </Health>',
+        '      </Role>',
+        '    </RoleInstanceList>',
+        '  </Container>',
+        '</Health>'])
+
     def __init__(self):
         LOG.debug('WALinuxAgentShim instantiated...')
-        self.endpoint = find_endpoint()
+        self.endpoint = self.find_endpoint()
         self.openssl_manager = OpenSSLManager()
         self.http_client = AzureEndpointHttpClient(
             self.openssl_manager.certificate)
         self.values = {}
 
+    @staticmethod
+    def find_endpoint():
+        LOG.debug('Finding Azure endpoint...')
+        content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+        value = None
+        for line in content.splitlines():
+            if 'unknown-245' in line:
+                value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+        if value is None:
+            raise Exception('No endpoint found in DHCP config.')
+        if ':' in value:
+            hex_string = ''
+            for hex_pair in value.split(':'):
+                if len(hex_pair) == 1:
+                    hex_pair = '0' + hex_pair
+                hex_string += hex_pair
+            value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
+        else:
+            value = value.encode('utf-8')
+        endpoint_ip_address = socket.inet_ntoa(value)
+        LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+        return endpoint_ip_address
+
     def register_with_azure_and_fetch_data(self):
         LOG.info('Registering with Azure...')
         for i in range(10):
@@ -303,7 +305,7 @@ class WALinuxAgentShim(object):
 
     def _report_ready(self, goal_state):
         LOG.debug('Reporting ready to Azure fabric.')
-        document = REPORT_READY_XML_TEMPLATE.format(
+        document = self.REPORT_READY_XML_TEMPLATE.format(
             incarnation=goal_state.incarnation,
             container_id=goal_state.container_id,
             instance_id=goal_state.instance_id,
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index fd5b24f8..28703029 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -637,11 +637,13 @@ class TestFindEndpoint(TestCase):
 
     def test_missing_file(self):
         self.load_file.side_effect = IOError
-        self.assertRaises(IOError, DataSourceAzure.find_endpoint)
+        self.assertRaises(IOError,
+                          DataSourceAzure.WALinuxAgentShim.find_endpoint)
 
     def test_missing_special_azure_line(self):
         self.load_file.return_value = ''
-        self.assertRaises(Exception, DataSourceAzure.find_endpoint)
+        self.assertRaises(Exception,
+                          DataSourceAzure.WALinuxAgentShim.find_endpoint)
 
     def _build_lease_content(self, ip_address, use_hex=True):
         ip_address_repr = ':'.join(
@@ -661,26 +663,30 @@ class TestFindEndpoint(TestCase):
         ip_address = '98.76.54.32'
         file_content = self._build_lease_content(ip_address)
         self.load_file.return_value = file_content
-        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+        self.assertEqual(ip_address,
+                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
 
     def test_hex_string_with_single_character_part(self):
         ip_address = '4.3.2.1'
         file_content = self._build_lease_content(ip_address)
         self.load_file.return_value = file_content
-        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+        self.assertEqual(ip_address,
+                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
 
     def test_packed_string(self):
         ip_address = '98.76.54.32'
         file_content = self._build_lease_content(ip_address, use_hex=False)
         self.load_file.return_value = file_content
-        self.assertEqual(ip_address, DataSourceAzure.find_endpoint())
+        self.assertEqual(ip_address,
+                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
 
     def test_latest_lease_used(self):
         ip_addresses = ['4.3.2.1', '98.76.54.32']
         file_content = '\n'.join([self._build_lease_content(ip_address)
                                   for ip_address in ip_addresses])
         self.load_file.return_value = file_content
-        self.assertEqual(ip_addresses[-1], DataSourceAzure.find_endpoint())
+        self.assertEqual(ip_addresses[-1],
+                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
 
 
 class TestGoalStateParsing(TestCase):
@@ -856,7 +862,8 @@ class TestWALinuxAgentShim(TestCase):
         self.AzureEndpointHttpClient = patches.enter_context(
             mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient'))
         self.find_endpoint = patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'find_endpoint'))
+            mock.patch.object(
+                DataSourceAzure.WALinuxAgentShim, 'find_endpoint'))
         self.GoalState = patches.enter_context(
             mock.patch.object(DataSourceAzure, 'GoalState'))
         self.iid_from_shared_config_content = patches.enter_context(
-- 
cgit v1.2.3


From 917f1792e3f0fe2ae9411530217a1892d9bc6d1c Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 13:00:06 +0100
Subject: Remove unused import.

---
 cloudinit/sources/DataSourceAzure.py | 1 -
 1 file changed, 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index ba4afa5f..c2dc6b4c 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,7 +17,6 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import base64
-import contextlib
 import crypt
 import fnmatch
 import os
-- 
cgit v1.2.3


From b9f26689e8b3bb7a3486771c6362107232a7dcf4 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 13:16:42 +0100
Subject: Split WALinuxAgentShim code out to separate file.

---
 cloudinit/sources/DataSourceAzure.py               | 271 +--------------
 cloudinit/sources/helpers/azure.py                 | 273 +++++++++++++++
 tests/unittests/test_datasource/test_azure.py      | 364 --------------------
 .../unittests/test_datasource/test_azure_helper.py | 377 +++++++++++++++++++++
 4 files changed, 653 insertions(+), 632 deletions(-)
 create mode 100644 cloudinit/sources/helpers/azure.py
 create mode 100644 tests/unittests/test_datasource/test_azure_helper.py

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index c2dc6b4c..5e147950 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,23 +17,19 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 import base64
+import contextlib
 import crypt
 import fnmatch
 import os
 import os.path
-import re
-import socket
-import struct
-import tempfile
-import time
-from contextlib import contextmanager
 from xml.dom import minidom
-from xml.etree import ElementTree
 
 from cloudinit import log as logging
 from cloudinit.settings import PER_ALWAYS
 from cloudinit import sources
 from cloudinit import util
+from cloudinit.sources.helpers.azure import (
+    iid_from_shared_config_content, WALinuxAgentShim)
 
 LOG = logging.getLogger(__name__)
 
@@ -70,253 +66,6 @@ DS_CFG_PATH = ['datasource', DS_NAME]
 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
 
 
-
-@contextmanager
-def cd(newdir):
-    prevdir = os.getcwd()
-    os.chdir(os.path.expanduser(newdir))
-    try:
-        yield
-    finally:
-        os.chdir(prevdir)
-
-
-class AzureEndpointHttpClient(object):
-
-    headers = {
-        'x-ms-agent-name': 'WALinuxAgent',
-        'x-ms-version': '2012-11-30',
-    }
-
-    def __init__(self, certificate):
-        self.extra_secure_headers = {
-            "x-ms-cipher-name": "DES_EDE3_CBC",
-            "x-ms-guest-agent-public-x509-cert": certificate,
-        }
-
-    def get(self, url, secure=False):
-        headers = self.headers
-        if secure:
-            headers = self.headers.copy()
-            headers.update(self.extra_secure_headers)
-        return util.read_file_or_url(url, headers=headers)
-
-    def post(self, url, data=None, extra_headers=None):
-        headers = self.headers
-        if extra_headers is not None:
-            headers = self.headers.copy()
-            headers.update(extra_headers)
-        return util.read_file_or_url(url, data=data, headers=headers)
-
-
-class GoalState(object):
-
-    def __init__(self, xml, http_client):
-        self.http_client = http_client
-        self.root = ElementTree.fromstring(xml)
-        self._certificates_xml = None
-
-    def _text_from_xpath(self, xpath):
-        element = self.root.find(xpath)
-        if element is not None:
-            return element.text
-        return None
-
-    @property
-    def container_id(self):
-        return self._text_from_xpath('./Container/ContainerId')
-
-    @property
-    def incarnation(self):
-        return self._text_from_xpath('./Incarnation')
-
-    @property
-    def instance_id(self):
-        return self._text_from_xpath(
-            './Container/RoleInstanceList/RoleInstance/InstanceId')
-
-    @property
-    def shared_config_xml(self):
-        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
-                                    '/Configuration/SharedConfig')
-        return self.http_client.get(url).contents
-
-    @property
-    def certificates_xml(self):
-        if self._certificates_xml is None:
-            url = self._text_from_xpath(
-                './Container/RoleInstanceList/RoleInstance'
-                '/Configuration/Certificates')
-            if url is not None:
-                self._certificates_xml = self.http_client.get(
-                    url, secure=True).contents
-        return self._certificates_xml
-
-
-class OpenSSLManager(object):
-
-    certificate_names = {
-        'private_key': 'TransportPrivate.pem',
-        'certificate': 'TransportCert.pem',
-    }
-
-    def __init__(self):
-        self.tmpdir = tempfile.TemporaryDirectory()
-        self.certificate = None
-        self.generate_certificate()
-
-    def generate_certificate(self):
-        LOG.debug('Generating certificate for communication with fabric...')
-        if self.certificate is not None:
-            LOG.debug('Certificate already generated.')
-            return
-        with cd(self.tmpdir.name):
-            util.subp([
-                'openssl', 'req', '-x509', '-nodes', '-subj',
-                '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
-                '-keyout', self.certificate_names['private_key'],
-                '-out', self.certificate_names['certificate'],
-            ])
-            certificate = ''
-            for line in open(self.certificate_names['certificate']):
-                if "CERTIFICATE" not in line:
-                    certificate += line.rstrip()
-            self.certificate = certificate
-        LOG.debug('New certificate generated.')
-
-    def parse_certificates(self, certificates_xml):
-        tag = ElementTree.fromstring(certificates_xml).find(
-            './/Data')
-        certificates_content = tag.text
-        lines = [
-            b'MIME-Version: 1.0',
-            b'Content-Disposition: attachment; filename="Certificates.p7m"',
-            b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
-            b'Content-Transfer-Encoding: base64',
-            b'',
-            certificates_content.encode('utf-8'),
-        ]
-        with cd(self.tmpdir.name):
-            with open('Certificates.p7m', 'wb') as f:
-                f.write(b'\n'.join(lines))
-            out, _ = util.subp(
-                'openssl cms -decrypt -in Certificates.p7m -inkey'
-                ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
-                ' -password pass:'.format(**self.certificate_names),
-                shell=True)
-        private_keys, certificates = [], []
-        current = []
-        for line in out.splitlines():
-            current.append(line)
-            if re.match(r'[-]+END .*?KEY[-]+$', line):
-                private_keys.append('\n'.join(current))
-                current = []
-            elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
-                certificates.append('\n'.join(current))
-                current = []
-        keys = []
-        for certificate in certificates:
-            with cd(self.tmpdir.name):
-                public_key, _ = util.subp(
-                    'openssl x509 -noout -pubkey |'
-                    'ssh-keygen -i -m PKCS8 -f /dev/stdin',
-                    data=certificate,
-                    shell=True)
-            keys.append(public_key)
-        return keys
-
-
-class WALinuxAgentShim(object):
-
-    REPORT_READY_XML_TEMPLATE = '\n'.join([
-        '<?xml version="1.0" encoding="utf-8"?>',
-        '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
-        ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
-        '  <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
-        '  <Container>',
-        '    <ContainerId>{container_id}</ContainerId>',
-        '    <RoleInstanceList>',
-        '      <Role>',
-        '        <InstanceId>{instance_id}</InstanceId>',
-        '        <Health>',
-        '          <State>Ready</State>',
-        '        </Health>',
-        '      </Role>',
-        '    </RoleInstanceList>',
-        '  </Container>',
-        '</Health>'])
-
-    def __init__(self):
-        LOG.debug('WALinuxAgentShim instantiated...')
-        self.endpoint = self.find_endpoint()
-        self.openssl_manager = OpenSSLManager()
-        self.http_client = AzureEndpointHttpClient(
-            self.openssl_manager.certificate)
-        self.values = {}
-
-    @staticmethod
-    def find_endpoint():
-        LOG.debug('Finding Azure endpoint...')
-        content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
-        value = None
-        for line in content.splitlines():
-            if 'unknown-245' in line:
-                value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
-        if value is None:
-            raise Exception('No endpoint found in DHCP config.')
-        if ':' in value:
-            hex_string = ''
-            for hex_pair in value.split(':'):
-                if len(hex_pair) == 1:
-                    hex_pair = '0' + hex_pair
-                hex_string += hex_pair
-            value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
-        else:
-            value = value.encode('utf-8')
-        endpoint_ip_address = socket.inet_ntoa(value)
-        LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
-        return endpoint_ip_address
-
-    def register_with_azure_and_fetch_data(self):
-        LOG.info('Registering with Azure...')
-        for i in range(10):
-            try:
-                response = self.http_client.get(
-                    'http://{}/machine/?comp=goalstate'.format(self.endpoint))
-            except Exception:
-                time.sleep(i + 1)
-            else:
-                break
-        LOG.debug('Successfully fetched GoalState XML.')
-        goal_state = GoalState(response.contents, self.http_client)
-        public_keys = []
-        if goal_state.certificates_xml is not None:
-            LOG.debug('Certificate XML found; parsing out public keys.')
-            public_keys = self.openssl_manager.parse_certificates(
-                goal_state.certificates_xml)
-        data = {
-            'instance-id': iid_from_shared_config_content(
-                goal_state.shared_config_xml),
-            'public-keys': public_keys,
-        }
-        self._report_ready(goal_state)
-        return data
-
-    def _report_ready(self, goal_state):
-        LOG.debug('Reporting ready to Azure fabric.')
-        document = self.REPORT_READY_XML_TEMPLATE.format(
-            incarnation=goal_state.incarnation,
-            container_id=goal_state.container_id,
-            instance_id=goal_state.instance_id,
-        )
-        self.http_client.post(
-            "http://{}/machine?comp=health".format(self.endpoint),
-            data=document,
-            extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
-        )
-        LOG.info('Reported ready to Azure fabric.')
-
-
 def get_hostname(hostname_command='hostname'):
     return util.subp(hostname_command, capture=True)[0].strip()
 
@@ -690,20 +439,6 @@ def load_azure_ovf_pubkeys(sshnode):
     return found
 
 
-def single_node_at_path(node, pathlist):
-    curnode = node
-    for tok in pathlist:
-        results = find_child(curnode, lambda n: n.localName == tok)
-        if len(results) == 0:
-            raise ValueError("missing %s token in %s" % (tok, str(pathlist)))
-        if len(results) > 1:
-            raise ValueError("found %s nodes of type %s looking for %s" %
-                             (len(results), tok, str(pathlist)))
-        curnode = results[0]
-
-    return curnode
-
-
 def read_azure_ovf(contents):
     try:
         dom = minidom.parseString(contents)
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
new file mode 100644
index 00000000..60f116e0
--- /dev/null
+++ b/cloudinit/sources/helpers/azure.py
@@ -0,0 +1,273 @@
+import logging
+import os
+import re
+import socket
+import struct
+import tempfile
+import time
+from contextlib import contextmanager
+from xml.etree import ElementTree
+
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+
+@contextmanager
+def cd(newdir):
+    prevdir = os.getcwd()
+    os.chdir(os.path.expanduser(newdir))
+    try:
+        yield
+    finally:
+        os.chdir(prevdir)
+
+
+class AzureEndpointHttpClient(object):
+
+    headers = {
+        'x-ms-agent-name': 'WALinuxAgent',
+        'x-ms-version': '2012-11-30',
+    }
+
+    def __init__(self, certificate):
+        self.extra_secure_headers = {
+            "x-ms-cipher-name": "DES_EDE3_CBC",
+            "x-ms-guest-agent-public-x509-cert": certificate,
+        }
+
+    def get(self, url, secure=False):
+        headers = self.headers
+        if secure:
+            headers = self.headers.copy()
+            headers.update(self.extra_secure_headers)
+        return util.read_file_or_url(url, headers=headers)
+
+    def post(self, url, data=None, extra_headers=None):
+        headers = self.headers
+        if extra_headers is not None:
+            headers = self.headers.copy()
+            headers.update(extra_headers)
+        return util.read_file_or_url(url, data=data, headers=headers)
+
+
+class GoalState(object):
+
+    def __init__(self, xml, http_client):
+        self.http_client = http_client
+        self.root = ElementTree.fromstring(xml)
+        self._certificates_xml = None
+
+    def _text_from_xpath(self, xpath):
+        element = self.root.find(xpath)
+        if element is not None:
+            return element.text
+        return None
+
+    @property
+    def container_id(self):
+        return self._text_from_xpath('./Container/ContainerId')
+
+    @property
+    def incarnation(self):
+        return self._text_from_xpath('./Incarnation')
+
+    @property
+    def instance_id(self):
+        return self._text_from_xpath(
+            './Container/RoleInstanceList/RoleInstance/InstanceId')
+
+    @property
+    def shared_config_xml(self):
+        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
+                                    '/Configuration/SharedConfig')
+        return self.http_client.get(url).contents
+
+    @property
+    def certificates_xml(self):
+        if self._certificates_xml is None:
+            url = self._text_from_xpath(
+                './Container/RoleInstanceList/RoleInstance'
+                '/Configuration/Certificates')
+            if url is not None:
+                self._certificates_xml = self.http_client.get(
+                    url, secure=True).contents
+        return self._certificates_xml
+
+
+class OpenSSLManager(object):
+
+    certificate_names = {
+        'private_key': 'TransportPrivate.pem',
+        'certificate': 'TransportCert.pem',
+    }
+
+    def __init__(self):
+        self.tmpdir = tempfile.TemporaryDirectory()
+        self.certificate = None
+        self.generate_certificate()
+
+    def generate_certificate(self):
+        LOG.debug('Generating certificate for communication with fabric...')
+        if self.certificate is not None:
+            LOG.debug('Certificate already generated.')
+            return
+        with cd(self.tmpdir.name):
+            util.subp([
+                'openssl', 'req', '-x509', '-nodes', '-subj',
+                '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
+                '-keyout', self.certificate_names['private_key'],
+                '-out', self.certificate_names['certificate'],
+            ])
+            certificate = ''
+            for line in open(self.certificate_names['certificate']):
+                if "CERTIFICATE" not in line:
+                    certificate += line.rstrip()
+            self.certificate = certificate
+        LOG.debug('New certificate generated.')
+
+    def parse_certificates(self, certificates_xml):
+        tag = ElementTree.fromstring(certificates_xml).find(
+            './/Data')
+        certificates_content = tag.text
+        lines = [
+            b'MIME-Version: 1.0',
+            b'Content-Disposition: attachment; filename="Certificates.p7m"',
+            b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
+            b'Content-Transfer-Encoding: base64',
+            b'',
+            certificates_content.encode('utf-8'),
+        ]
+        with cd(self.tmpdir.name):
+            with open('Certificates.p7m', 'wb') as f:
+                f.write(b'\n'.join(lines))
+            out, _ = util.subp(
+                'openssl cms -decrypt -in Certificates.p7m -inkey'
+                ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+                ' -password pass:'.format(**self.certificate_names),
+                shell=True)
+        private_keys, certificates = [], []
+        current = []
+        for line in out.splitlines():
+            current.append(line)
+            if re.match(r'[-]+END .*?KEY[-]+$', line):
+                private_keys.append('\n'.join(current))
+                current = []
+            elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+                certificates.append('\n'.join(current))
+                current = []
+        keys = []
+        for certificate in certificates:
+            with cd(self.tmpdir.name):
+                public_key, _ = util.subp(
+                    'openssl x509 -noout -pubkey |'
+                    'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+                    data=certificate,
+                    shell=True)
+            keys.append(public_key)
+        return keys
+
+
+def iid_from_shared_config_content(content):
+    """
+    find INSTANCE_ID in:
+    <?xml version="1.0" encoding="utf-8"?>
+    <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
+    <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
+        <Service name="..." guid="{00000000-0000-0000-0000-000000000000}"/>
+    """
+    root = ElementTree.fromstring(content)
+    depnode = root.find('Deployment')
+    return depnode.get('name')
+
+
+class WALinuxAgentShim(object):
+
+    REPORT_READY_XML_TEMPLATE = '\n'.join([
+        '<?xml version="1.0" encoding="utf-8"?>',
+        '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+        ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
+        '  <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
+        '  <Container>',
+        '    <ContainerId>{container_id}</ContainerId>',
+        '    <RoleInstanceList>',
+        '      <Role>',
+        '        <InstanceId>{instance_id}</InstanceId>',
+        '        <Health>',
+        '          <State>Ready</State>',
+        '        </Health>',
+        '      </Role>',
+        '    </RoleInstanceList>',
+        '  </Container>',
+        '</Health>'])
+
+    def __init__(self):
+        LOG.debug('WALinuxAgentShim instantiated...')
+        self.endpoint = self.find_endpoint()
+        self.openssl_manager = OpenSSLManager()
+        self.http_client = AzureEndpointHttpClient(
+            self.openssl_manager.certificate)
+        self.values = {}
+
+    @staticmethod
+    def find_endpoint():
+        LOG.debug('Finding Azure endpoint...')
+        content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+        value = None
+        for line in content.splitlines():
+            if 'unknown-245' in line:
+                value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+        if value is None:
+            raise Exception('No endpoint found in DHCP config.')
+        if ':' in value:
+            hex_string = ''
+            for hex_pair in value.split(':'):
+                if len(hex_pair) == 1:
+                    hex_pair = '0' + hex_pair
+                hex_string += hex_pair
+            value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
+        else:
+            value = value.encode('utf-8')
+        endpoint_ip_address = socket.inet_ntoa(value)
+        LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+        return endpoint_ip_address
+
+    def register_with_azure_and_fetch_data(self):
+        LOG.info('Registering with Azure...')
+        for i in range(10):
+            try:
+                response = self.http_client.get(
+                    'http://{}/machine/?comp=goalstate'.format(self.endpoint))
+            except Exception:
+                time.sleep(i + 1)
+            else:
+                break
+        LOG.debug('Successfully fetched GoalState XML.')
+        goal_state = GoalState(response.contents, self.http_client)
+        public_keys = []
+        if goal_state.certificates_xml is not None:
+            LOG.debug('Certificate XML found; parsing out public keys.')
+            public_keys = self.openssl_manager.parse_certificates(
+                goal_state.certificates_xml)
+        data = {
+            'instance-id': iid_from_shared_config_content(
+                goal_state.shared_config_xml),
+            'public-keys': public_keys,
+        }
+        self._report_ready(goal_state)
+        return data
+
+    def _report_ready(self, goal_state):
+        LOG.debug('Reporting ready to Azure fabric.')
+        document = self.REPORT_READY_XML_TEMPLATE.format(
+            incarnation=goal_state.incarnation,
+            container_id=goal_state.container_id,
+            instance_id=goal_state.instance_id,
+        )
+        self.http_client.post(
+            "http://{}/machine?comp=health".format(self.endpoint),
+            data=document,
+            extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+        )
+        LOG.info('Reported ready to Azure fabric.')
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 28703029..ee7109e1 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -15,47 +15,9 @@ except ImportError:
 import crypt
 import os
 import stat
-import struct
 import yaml
 import shutil
 import tempfile
-import unittest
-
-from cloudinit import url_helper
-
-
-GOAL_STATE_TEMPLATE = """\
-<?xml version="1.0" encoding="utf-8"?>
-<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="goalstate10.xsd">
-  <Version>2012-11-30</Version>
-  <Incarnation>{incarnation}</Incarnation>
-  <Machine>
-    <ExpectedState>Started</ExpectedState>
-    <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
-    <LBProbePorts>
-      <Port>16001</Port>
-    </LBProbePorts>
-    <ExpectHealthReport>FALSE</ExpectHealthReport>
-  </Machine>
-  <Container>
-    <ContainerId>{container_id}</ContainerId>
-    <RoleInstanceList>
-      <RoleInstance>
-        <InstanceId>{instance_id}</InstanceId>
-        <State>Started</State>
-        <Configuration>
-          <HostingEnvironmentConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>
-          <SharedConfig>{shared_config_url}</SharedConfig>
-          <ExtensionsConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=extensionsConfig&amp;incarnation=1</ExtensionsConfig>
-          <FullConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=fullConfig&amp;incarnation=1</FullConfig>
-          <Certificates>{certificates_url}</Certificates>
-          <ConfigName>68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml</ConfigName>
-        </Configuration>
-      </RoleInstance>
-    </RoleInstanceList>
-  </Container>
-</GoalState>
-"""
 
 
 def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
@@ -610,329 +572,3 @@ class TestReadAzureOvf(TestCase):
         for mypk in mypklist:
             self.assertIn(mypk, cfg['_pubkeys'])
 
-
-class TestReadAzureSharedConfig(unittest.TestCase):
-    def test_valid_content(self):
-        xml = """<?xml version="1.0" encoding="utf-8"?>
-            <SharedConfig>
-             <Deployment name="MY_INSTANCE_ID">
-              <Service name="myservice"/>
-              <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
-             </Deployment>
-            <Incarnation number="1"/>
-            </SharedConfig>"""
-        ret = DataSourceAzure.iid_from_shared_config_content(xml)
-        self.assertEqual("MY_INSTANCE_ID", ret)
-
-
-class TestFindEndpoint(TestCase):
-
-    def setUp(self):
-        super(TestFindEndpoint, self).setUp()
-        patches = ExitStack()
-        self.addCleanup(patches.close)
-
-        self.load_file = patches.enter_context(
-            mock.patch.object(DataSourceAzure.util, 'load_file'))
-
-    def test_missing_file(self):
-        self.load_file.side_effect = IOError
-        self.assertRaises(IOError,
-                          DataSourceAzure.WALinuxAgentShim.find_endpoint)
-
-    def test_missing_special_azure_line(self):
-        self.load_file.return_value = ''
-        self.assertRaises(Exception,
-                          DataSourceAzure.WALinuxAgentShim.find_endpoint)
-
-    def _build_lease_content(self, ip_address, use_hex=True):
-        ip_address_repr = ':'.join(
-            [hex(int(part)).replace('0x', '')
-             for part in ip_address.split('.')])
-        if not use_hex:
-            ip_address_repr = struct.pack(
-                '>L', int(ip_address_repr.replace(':', ''), 16))
-            ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
-        return '\n'.join([
-            'lease {',
-            ' interface "eth0";',
-            ' option unknown-245 {0};'.format(ip_address_repr),
-            '}'])
-
-    def test_hex_string(self):
-        ip_address = '98.76.54.32'
-        file_content = self._build_lease_content(ip_address)
-        self.load_file.return_value = file_content
-        self.assertEqual(ip_address,
-                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
-
-    def test_hex_string_with_single_character_part(self):
-        ip_address = '4.3.2.1'
-        file_content = self._build_lease_content(ip_address)
-        self.load_file.return_value = file_content
-        self.assertEqual(ip_address,
-                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
-
-    def test_packed_string(self):
-        ip_address = '98.76.54.32'
-        file_content = self._build_lease_content(ip_address, use_hex=False)
-        self.load_file.return_value = file_content
-        self.assertEqual(ip_address,
-                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
-
-    def test_latest_lease_used(self):
-        ip_addresses = ['4.3.2.1', '98.76.54.32']
-        file_content = '\n'.join([self._build_lease_content(ip_address)
-                                  for ip_address in ip_addresses])
-        self.load_file.return_value = file_content
-        self.assertEqual(ip_addresses[-1],
-                         DataSourceAzure.WALinuxAgentShim.find_endpoint())
-
-
-class TestGoalStateParsing(TestCase):
-
-    default_parameters = {
-        'incarnation': 1,
-        'container_id': 'MyContainerId',
-        'instance_id': 'MyInstanceId',
-        'shared_config_url': 'MySharedConfigUrl',
-        'certificates_url': 'MyCertificatesUrl',
-    }
-
-    def _get_goal_state(self, http_client=None, **kwargs):
-        if http_client is None:
-            http_client = mock.MagicMock()
-        parameters = self.default_parameters.copy()
-        parameters.update(kwargs)
-        xml = GOAL_STATE_TEMPLATE.format(**parameters)
-        if parameters['certificates_url'] is None:
-            new_xml_lines = []
-            for line in xml.splitlines():
-                if 'Certificates' in line:
-                    continue
-                new_xml_lines.append(line)
-            xml = '\n'.join(new_xml_lines)
-        return DataSourceAzure.GoalState(xml, http_client)
-
-    def test_incarnation_parsed_correctly(self):
-        incarnation = '123'
-        goal_state = self._get_goal_state(incarnation=incarnation)
-        self.assertEqual(incarnation, goal_state.incarnation)
-
-    def test_container_id_parsed_correctly(self):
-        container_id = 'TestContainerId'
-        goal_state = self._get_goal_state(container_id=container_id)
-        self.assertEqual(container_id, goal_state.container_id)
-
-    def test_instance_id_parsed_correctly(self):
-        instance_id = 'TestInstanceId'
-        goal_state = self._get_goal_state(instance_id=instance_id)
-        self.assertEqual(instance_id, goal_state.instance_id)
-
-    def test_shared_config_xml_parsed_and_fetched_correctly(self):
-        http_client = mock.MagicMock()
-        shared_config_url = 'TestSharedConfigUrl'
-        goal_state = self._get_goal_state(
-            http_client=http_client, shared_config_url=shared_config_url)
-        shared_config_xml = goal_state.shared_config_xml
-        self.assertEqual(1, http_client.get.call_count)
-        self.assertEqual(shared_config_url, http_client.get.call_args[0][0])
-        self.assertEqual(http_client.get.return_value.contents,
-                         shared_config_xml)
-
-    def test_certificates_xml_parsed_and_fetched_correctly(self):
-        http_client = mock.MagicMock()
-        certificates_url = 'TestSharedConfigUrl'
-        goal_state = self._get_goal_state(
-            http_client=http_client, certificates_url=certificates_url)
-        certificates_xml = goal_state.certificates_xml
-        self.assertEqual(1, http_client.get.call_count)
-        self.assertEqual(certificates_url, http_client.get.call_args[0][0])
-        self.assertTrue(http_client.get.call_args[1].get('secure', False))
-        self.assertEqual(http_client.get.return_value.contents,
-                         certificates_xml)
-
-    def test_missing_certificates_skips_http_get(self):
-        http_client = mock.MagicMock()
-        goal_state = self._get_goal_state(
-            http_client=http_client, certificates_url=None)
-        certificates_xml = goal_state.certificates_xml
-        self.assertEqual(0, http_client.get.call_count)
-        self.assertIsNone(certificates_xml)
-
-
-class TestAzureEndpointHttpClient(TestCase):
-
-    regular_headers = {
-        'x-ms-agent-name': 'WALinuxAgent',
-        'x-ms-version': '2012-11-30',
-    }
-
-    def setUp(self):
-        super(TestAzureEndpointHttpClient, self).setUp()
-        patches = ExitStack()
-        self.addCleanup(patches.close)
-
-        self.read_file_or_url = patches.enter_context(
-            mock.patch.object(DataSourceAzure.util, 'read_file_or_url'))
-
-    def test_non_secure_get(self):
-        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
-        url = 'MyTestUrl'
-        response = client.get(url, secure=False)
-        self.assertEqual(1, self.read_file_or_url.call_count)
-        self.assertEqual(self.read_file_or_url.return_value, response)
-        self.assertEqual(mock.call(url, headers=self.regular_headers),
-                         self.read_file_or_url.call_args)
-
-    def test_secure_get(self):
-        url = 'MyTestUrl'
-        certificate = mock.MagicMock()
-        expected_headers = self.regular_headers.copy()
-        expected_headers.update({
-            "x-ms-cipher-name": "DES_EDE3_CBC",
-            "x-ms-guest-agent-public-x509-cert": certificate,
-        })
-        client = DataSourceAzure.AzureEndpointHttpClient(certificate)
-        response = client.get(url, secure=True)
-        self.assertEqual(1, self.read_file_or_url.call_count)
-        self.assertEqual(self.read_file_or_url.return_value, response)
-        self.assertEqual(mock.call(url, headers=expected_headers),
-                         self.read_file_or_url.call_args)
-
-    def test_post(self):
-        data = mock.MagicMock()
-        url = 'MyTestUrl'
-        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
-        response = client.post(url, data=data)
-        self.assertEqual(1, self.read_file_or_url.call_count)
-        self.assertEqual(self.read_file_or_url.return_value, response)
-        self.assertEqual(
-            mock.call(url, data=data, headers=self.regular_headers),
-            self.read_file_or_url.call_args)
-
-    def test_post_with_extra_headers(self):
-        url = 'MyTestUrl'
-        client = DataSourceAzure.AzureEndpointHttpClient(mock.MagicMock())
-        extra_headers = {'test': 'header'}
-        client.post(url, extra_headers=extra_headers)
-        self.assertEqual(1, self.read_file_or_url.call_count)
-        expected_headers = self.regular_headers.copy()
-        expected_headers.update(extra_headers)
-        self.assertEqual(
-            mock.call(mock.ANY, data=mock.ANY, headers=expected_headers),
-            self.read_file_or_url.call_args)
-
-
-class TestOpenSSLManager(TestCase):
-
-    def setUp(self):
-        super(TestOpenSSLManager, self).setUp()
-        patches = ExitStack()
-        self.addCleanup(patches.close)
-
-        self.subp = patches.enter_context(
-            mock.patch.object(DataSourceAzure.util, 'subp'))
-
-    @mock.patch.object(DataSourceAzure, 'cd', mock.MagicMock())
-    @mock.patch.object(DataSourceAzure.tempfile, 'TemporaryDirectory')
-    def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory):
-        manager = DataSourceAzure.OpenSSLManager()
-        self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir)
-
-    @mock.patch('builtins.open')
-    def test_generate_certificate_uses_tmpdir(self, open):
-        subp_directory = {}
-
-        def capture_directory(*args, **kwargs):
-            subp_directory['path'] = os.getcwd()
-
-        self.subp.side_effect = capture_directory
-        manager = DataSourceAzure.OpenSSLManager()
-        self.assertEqual(manager.tmpdir.name, subp_directory['path'])
-
-
-class TestWALinuxAgentShim(TestCase):
-
-    def setUp(self):
-        super(TestWALinuxAgentShim, self).setUp()
-        patches = ExitStack()
-        self.addCleanup(patches.close)
-
-        self.AzureEndpointHttpClient = patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'AzureEndpointHttpClient'))
-        self.find_endpoint = patches.enter_context(
-            mock.patch.object(
-                DataSourceAzure.WALinuxAgentShim, 'find_endpoint'))
-        self.GoalState = patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'GoalState'))
-        self.iid_from_shared_config_content = patches.enter_context(
-            mock.patch.object(DataSourceAzure,
-                              'iid_from_shared_config_content'))
-        self.OpenSSLManager = patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'OpenSSLManager'))
-
-    def test_http_client_uses_certificate(self):
-        shim = DataSourceAzure.WALinuxAgentShim()
-        self.assertEqual(
-            [mock.call(self.OpenSSLManager.return_value.certificate)],
-            self.AzureEndpointHttpClient.call_args_list)
-        self.assertEqual(self.AzureEndpointHttpClient.return_value,
-                         shim.http_client)
-
-    def test_correct_url_used_for_goalstate(self):
-        self.find_endpoint.return_value = 'test_endpoint'
-        shim = DataSourceAzure.WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
-        get = self.AzureEndpointHttpClient.return_value.get
-        self.assertEqual(
-            [mock.call('http://test_endpoint/machine/?comp=goalstate')],
-            get.call_args_list)
-        self.assertEqual(
-            [mock.call(get.return_value.contents, shim.http_client)],
-            self.GoalState.call_args_list)
-
-    def test_certificates_used_to_determine_public_keys(self):
-        shim = DataSourceAzure.WALinuxAgentShim()
-        data = shim.register_with_azure_and_fetch_data()
-        self.assertEqual(
-            [mock.call(self.GoalState.return_value.certificates_xml)],
-            self.OpenSSLManager.return_value.parse_certificates.call_args_list)
-        self.assertEqual(
-            self.OpenSSLManager.return_value.parse_certificates.return_value,
-            data['public-keys'])
-
-    def test_absent_certificates_produces_empty_public_keys(self):
-        self.GoalState.return_value.certificates_xml = None
-        shim = DataSourceAzure.WALinuxAgentShim()
-        data = shim.register_with_azure_and_fetch_data()
-        self.assertEqual([], data['public-keys'])
-
-    def test_instance_id_returned_in_data(self):
-        shim = DataSourceAzure.WALinuxAgentShim()
-        data = shim.register_with_azure_and_fetch_data()
-        self.assertEqual(
-            [mock.call(self.GoalState.return_value.shared_config_xml)],
-            self.iid_from_shared_config_content.call_args_list)
-        self.assertEqual(self.iid_from_shared_config_content.return_value,
-                         data['instance-id'])
-
-    def test_correct_url_used_for_report_ready(self):
-        self.find_endpoint.return_value = 'test_endpoint'
-        shim = DataSourceAzure.WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
-        expected_url = 'http://test_endpoint/machine?comp=health'
-        self.assertEqual(
-            [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
-            shim.http_client.post.call_args_list)
-
-    def test_goal_state_values_used_for_report_ready(self):
-        self.GoalState.return_value.incarnation = 'TestIncarnation'
-        self.GoalState.return_value.container_id = 'TestContainerId'
-        self.GoalState.return_value.instance_id = 'TestInstanceId'
-        shim = DataSourceAzure.WALinuxAgentShim()
-        shim.register_with_azure_and_fetch_data()
-        posted_document = shim.http_client.post.call_args[1]['data']
-        self.assertIn('TestIncarnation', posted_document)
-        self.assertIn('TestContainerId', posted_document)
-        self.assertIn('TestInstanceId', posted_document)
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
new file mode 100644
index 00000000..47b77840
--- /dev/null
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -0,0 +1,377 @@
+import os
+import struct
+import unittest
+
+from cloudinit.sources.helpers import azure as azure_helper
+from ..helpers import TestCase
+
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
+try:
+    from contextlib import ExitStack
+except ImportError:
+    from contextlib2 import ExitStack
+
+
+GOAL_STATE_TEMPLATE = """\
+<?xml version="1.0" encoding="utf-8"?>
+<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="goalstate10.xsd">
+  <Version>2012-11-30</Version>
+  <Incarnation>{incarnation}</Incarnation>
+  <Machine>
+    <ExpectedState>Started</ExpectedState>
+    <StopRolesDeadlineHint>300000</StopRolesDeadlineHint>
+    <LBProbePorts>
+      <Port>16001</Port>
+    </LBProbePorts>
+    <ExpectHealthReport>FALSE</ExpectHealthReport>
+  </Machine>
+  <Container>
+    <ContainerId>{container_id}</ContainerId>
+    <RoleInstanceList>
+      <RoleInstance>
+        <InstanceId>{instance_id}</InstanceId>
+        <State>Started</State>
+        <Configuration>
+          <HostingEnvironmentConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>
+          <SharedConfig>{shared_config_url}</SharedConfig>
+          <ExtensionsConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=extensionsConfig&amp;incarnation=1</ExtensionsConfig>
+          <FullConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=fullConfig&amp;incarnation=1</FullConfig>
+          <Certificates>{certificates_url}</Certificates>
+          <ConfigName>68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml</ConfigName>
+        </Configuration>
+      </RoleInstance>
+    </RoleInstanceList>
+  </Container>
+</GoalState>
+"""
+
+
+class TestReadAzureSharedConfig(unittest.TestCase):
+
+    def test_valid_content(self):
+        xml = """<?xml version="1.0" encoding="utf-8"?>
+            <SharedConfig>
+             <Deployment name="MY_INSTANCE_ID">
+              <Service name="myservice"/>
+              <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
+             </Deployment>
+            <Incarnation number="1"/>
+            </SharedConfig>"""
+        ret = azure_helper.iid_from_shared_config_content(xml)
+        self.assertEqual("MY_INSTANCE_ID", ret)
+
+
+class TestFindEndpoint(TestCase):
+
+    def setUp(self):
+        super(TestFindEndpoint, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.load_file = patches.enter_context(
+            mock.patch.object(azure_helper.util, 'load_file'))
+
+    def test_missing_file(self):
+        self.load_file.side_effect = IOError
+        self.assertRaises(IOError,
+                          azure_helper.WALinuxAgentShim.find_endpoint)
+
+    def test_missing_special_azure_line(self):
+        self.load_file.return_value = ''
+        self.assertRaises(Exception,
+                          azure_helper.WALinuxAgentShim.find_endpoint)
+
+    def _build_lease_content(self, ip_address, use_hex=True):
+        ip_address_repr = ':'.join(
+            [hex(int(part)).replace('0x', '')
+             for part in ip_address.split('.')])
+        if not use_hex:
+            ip_address_repr = struct.pack(
+                '>L', int(ip_address_repr.replace(':', ''), 16))
+            ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
+        return '\n'.join([
+            'lease {',
+            ' interface "eth0";',
+            ' option unknown-245 {0};'.format(ip_address_repr),
+            '}'])
+
+    def test_hex_string(self):
+        ip_address = '98.76.54.32'
+        file_content = self._build_lease_content(ip_address)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address,
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
+    def test_hex_string_with_single_character_part(self):
+        ip_address = '4.3.2.1'
+        file_content = self._build_lease_content(ip_address)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address,
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
+    def test_packed_string(self):
+        ip_address = '98.76.54.32'
+        file_content = self._build_lease_content(ip_address, use_hex=False)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address,
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
+    def test_latest_lease_used(self):
+        ip_addresses = ['4.3.2.1', '98.76.54.32']
+        file_content = '\n'.join([self._build_lease_content(ip_address)
+                                  for ip_address in ip_addresses])
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_addresses[-1],
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
+
+class TestGoalStateParsing(TestCase):
+
+    default_parameters = {
+        'incarnation': 1,
+        'container_id': 'MyContainerId',
+        'instance_id': 'MyInstanceId',
+        'shared_config_url': 'MySharedConfigUrl',
+        'certificates_url': 'MyCertificatesUrl',
+    }
+
+    def _get_goal_state(self, http_client=None, **kwargs):
+        if http_client is None:
+            http_client = mock.MagicMock()
+        parameters = self.default_parameters.copy()
+        parameters.update(kwargs)
+        xml = GOAL_STATE_TEMPLATE.format(**parameters)
+        if parameters['certificates_url'] is None:
+            new_xml_lines = []
+            for line in xml.splitlines():
+                if 'Certificates' in line:
+                    continue
+                new_xml_lines.append(line)
+            xml = '\n'.join(new_xml_lines)
+        return azure_helper.GoalState(xml, http_client)
+
+    def test_incarnation_parsed_correctly(self):
+        incarnation = '123'
+        goal_state = self._get_goal_state(incarnation=incarnation)
+        self.assertEqual(incarnation, goal_state.incarnation)
+
+    def test_container_id_parsed_correctly(self):
+        container_id = 'TestContainerId'
+        goal_state = self._get_goal_state(container_id=container_id)
+        self.assertEqual(container_id, goal_state.container_id)
+
+    def test_instance_id_parsed_correctly(self):
+        instance_id = 'TestInstanceId'
+        goal_state = self._get_goal_state(instance_id=instance_id)
+        self.assertEqual(instance_id, goal_state.instance_id)
+
+    def test_shared_config_xml_parsed_and_fetched_correctly(self):
+        http_client = mock.MagicMock()
+        shared_config_url = 'TestSharedConfigUrl'
+        goal_state = self._get_goal_state(
+            http_client=http_client, shared_config_url=shared_config_url)
+        shared_config_xml = goal_state.shared_config_xml
+        self.assertEqual(1, http_client.get.call_count)
+        self.assertEqual(shared_config_url, http_client.get.call_args[0][0])
+        self.assertEqual(http_client.get.return_value.contents,
+                         shared_config_xml)
+
+    def test_certificates_xml_parsed_and_fetched_correctly(self):
+        http_client = mock.MagicMock()
+        certificates_url = 'TestSharedConfigUrl'
+        goal_state = self._get_goal_state(
+            http_client=http_client, certificates_url=certificates_url)
+        certificates_xml = goal_state.certificates_xml
+        self.assertEqual(1, http_client.get.call_count)
+        self.assertEqual(certificates_url, http_client.get.call_args[0][0])
+        self.assertTrue(http_client.get.call_args[1].get('secure', False))
+        self.assertEqual(http_client.get.return_value.contents,
+                         certificates_xml)
+
+    def test_missing_certificates_skips_http_get(self):
+        http_client = mock.MagicMock()
+        goal_state = self._get_goal_state(
+            http_client=http_client, certificates_url=None)
+        certificates_xml = goal_state.certificates_xml
+        self.assertEqual(0, http_client.get.call_count)
+        self.assertIsNone(certificates_xml)
+
+
+class TestAzureEndpointHttpClient(TestCase):
+
+    regular_headers = {
+        'x-ms-agent-name': 'WALinuxAgent',
+        'x-ms-version': '2012-11-30',
+    }
+
+    def setUp(self):
+        super(TestAzureEndpointHttpClient, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.read_file_or_url = patches.enter_context(
+            mock.patch.object(azure_helper.util, 'read_file_or_url'))
+
+    def test_non_secure_get(self):
+        client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+        url = 'MyTestUrl'
+        response = client.get(url, secure=False)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(mock.call(url, headers=self.regular_headers),
+                         self.read_file_or_url.call_args)
+
+    def test_secure_get(self):
+        url = 'MyTestUrl'
+        certificate = mock.MagicMock()
+        expected_headers = self.regular_headers.copy()
+        expected_headers.update({
+            "x-ms-cipher-name": "DES_EDE3_CBC",
+            "x-ms-guest-agent-public-x509-cert": certificate,
+        })
+        client = azure_helper.AzureEndpointHttpClient(certificate)
+        response = client.get(url, secure=True)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(mock.call(url, headers=expected_headers),
+                         self.read_file_or_url.call_args)
+
+    def test_post(self):
+        data = mock.MagicMock()
+        url = 'MyTestUrl'
+        client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+        response = client.post(url, data=data)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        self.assertEqual(self.read_file_or_url.return_value, response)
+        self.assertEqual(
+            mock.call(url, data=data, headers=self.regular_headers),
+            self.read_file_or_url.call_args)
+
+    def test_post_with_extra_headers(self):
+        url = 'MyTestUrl'
+        client = azure_helper.AzureEndpointHttpClient(mock.MagicMock())
+        extra_headers = {'test': 'header'}
+        client.post(url, extra_headers=extra_headers)
+        self.assertEqual(1, self.read_file_or_url.call_count)
+        expected_headers = self.regular_headers.copy()
+        expected_headers.update(extra_headers)
+        self.assertEqual(
+            mock.call(mock.ANY, data=mock.ANY, headers=expected_headers),
+            self.read_file_or_url.call_args)
+
+
+class TestOpenSSLManager(TestCase):
+
+    def setUp(self):
+        super(TestOpenSSLManager, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.subp = patches.enter_context(
+            mock.patch.object(azure_helper.util, 'subp'))
+
+    @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
+    @mock.patch.object(azure_helper.tempfile, 'TemporaryDirectory')
+    def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory):
+        manager = azure_helper.OpenSSLManager()
+        self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir)
+
+    @mock.patch('builtins.open')
+    def test_generate_certificate_uses_tmpdir(self, open):
+        subp_directory = {}
+
+        def capture_directory(*args, **kwargs):
+            subp_directory['path'] = os.getcwd()
+
+        self.subp.side_effect = capture_directory
+        manager = azure_helper.OpenSSLManager()
+        self.assertEqual(manager.tmpdir.name, subp_directory['path'])
+
+
+class TestWALinuxAgentShim(TestCase):
+
+    def setUp(self):
+        super(TestWALinuxAgentShim, self).setUp()
+        patches = ExitStack()
+        self.addCleanup(patches.close)
+
+        self.AzureEndpointHttpClient = patches.enter_context(
+            mock.patch.object(azure_helper, 'AzureEndpointHttpClient'))
+        self.find_endpoint = patches.enter_context(
+            mock.patch.object(
+                azure_helper.WALinuxAgentShim, 'find_endpoint'))
+        self.GoalState = patches.enter_context(
+            mock.patch.object(azure_helper, 'GoalState'))
+        self.iid_from_shared_config_content = patches.enter_context(
+            mock.patch.object(azure_helper, 'iid_from_shared_config_content'))
+        self.OpenSSLManager = patches.enter_context(
+            mock.patch.object(azure_helper, 'OpenSSLManager'))
+
+    def test_http_client_uses_certificate(self):
+        shim = azure_helper.WALinuxAgentShim()
+        self.assertEqual(
+            [mock.call(self.OpenSSLManager.return_value.certificate)],
+            self.AzureEndpointHttpClient.call_args_list)
+        self.assertEqual(self.AzureEndpointHttpClient.return_value,
+                         shim.http_client)
+
+    def test_correct_url_used_for_goalstate(self):
+        self.find_endpoint.return_value = 'test_endpoint'
+        shim = azure_helper.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        get = self.AzureEndpointHttpClient.return_value.get
+        self.assertEqual(
+            [mock.call('http://test_endpoint/machine/?comp=goalstate')],
+            get.call_args_list)
+        self.assertEqual(
+            [mock.call(get.return_value.contents, shim.http_client)],
+            self.GoalState.call_args_list)
+
+    def test_certificates_used_to_determine_public_keys(self):
+        shim = azure_helper.WALinuxAgentShim()
+        data = shim.register_with_azure_and_fetch_data()
+        self.assertEqual(
+            [mock.call(self.GoalState.return_value.certificates_xml)],
+            self.OpenSSLManager.return_value.parse_certificates.call_args_list)
+        self.assertEqual(
+            self.OpenSSLManager.return_value.parse_certificates.return_value,
+            data['public-keys'])
+
+    def test_absent_certificates_produces_empty_public_keys(self):
+        self.GoalState.return_value.certificates_xml = None
+        shim = azure_helper.WALinuxAgentShim()
+        data = shim.register_with_azure_and_fetch_data()
+        self.assertEqual([], data['public-keys'])
+
+    def test_instance_id_returned_in_data(self):
+        shim = azure_helper.WALinuxAgentShim()
+        data = shim.register_with_azure_and_fetch_data()
+        self.assertEqual(
+            [mock.call(self.GoalState.return_value.shared_config_xml)],
+            self.iid_from_shared_config_content.call_args_list)
+        self.assertEqual(self.iid_from_shared_config_content.return_value,
+                         data['instance-id'])
+
+    def test_correct_url_used_for_report_ready(self):
+        self.find_endpoint.return_value = 'test_endpoint'
+        shim = azure_helper.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        expected_url = 'http://test_endpoint/machine?comp=health'
+        self.assertEqual(
+            [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
+            shim.http_client.post.call_args_list)
+
+    def test_goal_state_values_used_for_report_ready(self):
+        self.GoalState.return_value.incarnation = 'TestIncarnation'
+        self.GoalState.return_value.container_id = 'TestContainerId'
+        self.GoalState.return_value.instance_id = 'TestInstanceId'
+        shim = azure_helper.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        posted_document = shim.http_client.post.call_args[1]['data']
+        self.assertIn('TestIncarnation', posted_document)
+        self.assertIn('TestContainerId', posted_document)
+        self.assertIn('TestInstanceId', posted_document)
-- 
cgit v1.2.3


From 9c7643c4a0dee7843963709c361b755baf843a4b Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 13:16:44 +0100
Subject: Stop using Python 3 only tempfile.TemporaryDirectory (but lose free
 cleanup).

---
 cloudinit/sources/helpers/azure.py                   |  8 ++++----
 tests/unittests/test_datasource/test_azure_helper.py | 17 +++++++++++------
 2 files changed, 15 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 60f116e0..cb13187f 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -104,7 +104,7 @@ class OpenSSLManager(object):
     }
 
     def __init__(self):
-        self.tmpdir = tempfile.TemporaryDirectory()
+        self.tmpdir = tempfile.mkdtemp()
         self.certificate = None
         self.generate_certificate()
 
@@ -113,7 +113,7 @@ class OpenSSLManager(object):
         if self.certificate is not None:
             LOG.debug('Certificate already generated.')
             return
-        with cd(self.tmpdir.name):
+        with cd(self.tmpdir):
             util.subp([
                 'openssl', 'req', '-x509', '-nodes', '-subj',
                 '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
@@ -139,7 +139,7 @@ class OpenSSLManager(object):
             b'',
             certificates_content.encode('utf-8'),
         ]
-        with cd(self.tmpdir.name):
+        with cd(self.tmpdir):
             with open('Certificates.p7m', 'wb') as f:
                 f.write(b'\n'.join(lines))
             out, _ = util.subp(
@@ -159,7 +159,7 @@ class OpenSSLManager(object):
                 current = []
         keys = []
         for certificate in certificates:
-            with cd(self.tmpdir.name):
+            with cd(self.tmpdir):
                 public_key, _ = util.subp(
                     'openssl x509 -noout -pubkey |'
                     'ssh-keygen -i -m PKCS8 -f /dev/stdin',
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 47b77840..398a9007 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -273,15 +273,20 @@ class TestOpenSSLManager(TestCase):
 
         self.subp = patches.enter_context(
             mock.patch.object(azure_helper.util, 'subp'))
+        try:
+            self.open = patches.enter_context(
+                mock.patch('__builtin__.open'))
+        except ImportError:
+            self.open = patches.enter_context(
+                mock.patch('builtins.open'))
 
     @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
-    @mock.patch.object(azure_helper.tempfile, 'TemporaryDirectory')
-    def test_openssl_manager_creates_a_tmpdir(self, TemporaryDirectory):
+    @mock.patch.object(azure_helper.tempfile, 'mkdtemp')
+    def test_openssl_manager_creates_a_tmpdir(self, mkdtemp):
         manager = azure_helper.OpenSSLManager()
-        self.assertEqual(TemporaryDirectory.return_value, manager.tmpdir)
+        self.assertEqual(mkdtemp.return_value, manager.tmpdir)
 
-    @mock.patch('builtins.open')
-    def test_generate_certificate_uses_tmpdir(self, open):
+    def test_generate_certificate_uses_tmpdir(self):
         subp_directory = {}
 
         def capture_directory(*args, **kwargs):
@@ -289,7 +294,7 @@ class TestOpenSSLManager(TestCase):
 
         self.subp.side_effect = capture_directory
         manager = azure_helper.OpenSSLManager()
-        self.assertEqual(manager.tmpdir.name, subp_directory['path'])
+        self.assertEqual(manager.tmpdir, subp_directory['path'])
 
 
 class TestWALinuxAgentShim(TestCase):
-- 
cgit v1.2.3


From 84868622c404cda5efd2a753e2de30c1afca49a2 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 13:18:02 +0100
Subject: Move our walinuxagent implementation to a single function call.

---
 cloudinit/sources/DataSourceAzure.py               |  8 ++--
 cloudinit/sources/helpers/azure.py                 | 31 ++++++++----
 tests/unittests/test_datasource/test_azure.py      | 19 ++++++--
 .../unittests/test_datasource/test_azure_helper.py | 56 ++++++++++++++++++++--
 4 files changed, 92 insertions(+), 22 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 5e147950..4053cfa6 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -29,7 +29,7 @@ from cloudinit.settings import PER_ALWAYS
 from cloudinit import sources
 from cloudinit import util
 from cloudinit.sources.helpers.azure import (
-    iid_from_shared_config_content, WALinuxAgentShim)
+    get_metadata_from_fabric, iid_from_shared_config_content)
 
 LOG = logging.getLogger(__name__)
 
@@ -185,15 +185,13 @@ class DataSourceAzureNet(sources.DataSource):
         write_files(ddir, files, dirmode=0o700)
 
         try:
-            shim = WALinuxAgentShim()
-            data = shim.register_with_azure_and_fetch_data()
+            fabric_data = get_metadata_from_fabric()
         except Exception as exc:
             LOG.info("Error communicating with Azure fabric; assume we aren't"
                      " on Azure.", exc_info=True)
             return False
 
-        self.metadata['instance-id'] = data['instance-id']
-        self.metadata['public-keys'] = data['public-keys']
+        self.metadata.update(fabric_data)
 
         found_ephemeral = find_ephemeral_disk()
         if found_ephemeral:
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index cb13187f..dfdfa7c2 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -108,6 +108,9 @@ class OpenSSLManager(object):
         self.certificate = None
         self.generate_certificate()
 
+    def clean_up(self):
+        util.del_dir(self.tmpdir)
+
     def generate_certificate(self):
         LOG.debug('Generating certificate for communication with fabric...')
         if self.certificate is not None:
@@ -205,11 +208,13 @@ class WALinuxAgentShim(object):
     def __init__(self):
         LOG.debug('WALinuxAgentShim instantiated...')
         self.endpoint = self.find_endpoint()
-        self.openssl_manager = OpenSSLManager()
-        self.http_client = AzureEndpointHttpClient(
-            self.openssl_manager.certificate)
+        self.openssl_manager = None
         self.values = {}
 
+    def clean_up(self):
+        if self.openssl_manager is not None:
+            self.openssl_manager.clean_up()
+
     @staticmethod
     def find_endpoint():
         LOG.debug('Finding Azure endpoint...')
@@ -234,17 +239,19 @@ class WALinuxAgentShim(object):
         return endpoint_ip_address
 
     def register_with_azure_and_fetch_data(self):
+        self.openssl_manager = OpenSSLManager()
+        http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
         LOG.info('Registering with Azure...')
         for i in range(10):
             try:
-                response = self.http_client.get(
+                response = http_client.get(
                     'http://{}/machine/?comp=goalstate'.format(self.endpoint))
             except Exception:
                 time.sleep(i + 1)
             else:
                 break
         LOG.debug('Successfully fetched GoalState XML.')
-        goal_state = GoalState(response.contents, self.http_client)
+        goal_state = GoalState(response.contents, http_client)
         public_keys = []
         if goal_state.certificates_xml is not None:
             LOG.debug('Certificate XML found; parsing out public keys.')
@@ -255,19 +262,27 @@ class WALinuxAgentShim(object):
                 goal_state.shared_config_xml),
             'public-keys': public_keys,
         }
-        self._report_ready(goal_state)
+        self._report_ready(goal_state, http_client)
         return data
 
-    def _report_ready(self, goal_state):
+    def _report_ready(self, goal_state, http_client):
         LOG.debug('Reporting ready to Azure fabric.')
         document = self.REPORT_READY_XML_TEMPLATE.format(
             incarnation=goal_state.incarnation,
             container_id=goal_state.container_id,
             instance_id=goal_state.instance_id,
         )
-        self.http_client.post(
+        http_client.post(
             "http://{}/machine?comp=health".format(self.endpoint),
             data=document,
             extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
         )
         LOG.info('Reported ready to Azure fabric.')
+
+
+def get_metadata_from_fabric():
+    shim = WALinuxAgentShim()
+    try:
+        return shim.register_with_azure_and_fetch_data()
+    finally:
+        shim.clean_up()
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index ee7109e1..983be4cd 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -122,11 +122,10 @@ class TestAzureDataSource(TestCase):
         mod = DataSourceAzure
         mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
 
-        fake_shim = mock.MagicMock()
-        fake_shim().register_with_azure_and_fetch_data.return_value = {
+        self.get_metadata_from_fabric = mock.MagicMock(return_value={
             'instance-id': 'i-my-azure-id',
             'public-keys': [],
-        }
+        })
 
         self.apply_patches([
             (mod, 'list_possible_azure_ds_devs', dsdevs),
@@ -137,7 +136,7 @@ class TestAzureDataSource(TestCase):
             (mod, 'perform_hostname_bounce', mock.MagicMock()),
             (mod, 'get_hostname', mock.MagicMock()),
             (mod, 'set_hostname', mock.MagicMock()),
-            (mod, 'WALinuxAgentShim', fake_shim),
+            (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
         ])
 
         dsrc = mod.DataSourceAzureNet(
@@ -388,6 +387,18 @@ class TestAzureDataSource(TestCase):
         self.assertEqual(new_ovfenv,
             load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
 
+    def test_exception_fetching_fabric_data_doesnt_propagate(self):
+        ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        self.get_metadata_from_fabric.side_effect = Exception
+        self.assertFalse(ds.get_data())
+
+    def test_fabric_data_included_in_metadata(self):
+        ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        self.get_metadata_from_fabric.return_value = {'test': 'value'}
+        ret = ds.get_data()
+        self.assertTrue(ret)
+        self.assertEqual('value', ds.metadata['test'])
+
 
 class TestAzureBounce(TestCase):
 
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 398a9007..5fac2ade 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -296,6 +296,14 @@ class TestOpenSSLManager(TestCase):
         manager = azure_helper.OpenSSLManager()
         self.assertEqual(manager.tmpdir, subp_directory['path'])
 
+    @mock.patch.object(azure_helper, 'cd', mock.MagicMock())
+    @mock.patch.object(azure_helper.tempfile, 'mkdtemp', mock.MagicMock())
+    @mock.patch.object(azure_helper.util, 'del_dir')
+    def test_clean_up(self, del_dir):
+        manager = azure_helper.OpenSSLManager()
+        manager.clean_up()
+        self.assertEqual([mock.call(manager.tmpdir)], del_dir.call_args_list)
+
 
 class TestWALinuxAgentShim(TestCase):
 
@@ -318,11 +326,10 @@ class TestWALinuxAgentShim(TestCase):
 
     def test_http_client_uses_certificate(self):
         shim = azure_helper.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
         self.assertEqual(
             [mock.call(self.OpenSSLManager.return_value.certificate)],
             self.AzureEndpointHttpClient.call_args_list)
-        self.assertEqual(self.AzureEndpointHttpClient.return_value,
-                         shim.http_client)
 
     def test_correct_url_used_for_goalstate(self):
         self.find_endpoint.return_value = 'test_endpoint'
@@ -333,7 +340,8 @@ class TestWALinuxAgentShim(TestCase):
             [mock.call('http://test_endpoint/machine/?comp=goalstate')],
             get.call_args_list)
         self.assertEqual(
-            [mock.call(get.return_value.contents, shim.http_client)],
+            [mock.call(get.return_value.contents,
+                       self.AzureEndpointHttpClient.return_value)],
             self.GoalState.call_args_list)
 
     def test_certificates_used_to_determine_public_keys(self):
@@ -368,7 +376,7 @@ class TestWALinuxAgentShim(TestCase):
         expected_url = 'http://test_endpoint/machine?comp=health'
         self.assertEqual(
             [mock.call(expected_url, data=mock.ANY, extra_headers=mock.ANY)],
-            shim.http_client.post.call_args_list)
+            self.AzureEndpointHttpClient.return_value.post.call_args_list)
 
     def test_goal_state_values_used_for_report_ready(self):
         self.GoalState.return_value.incarnation = 'TestIncarnation'
@@ -376,7 +384,45 @@ class TestWALinuxAgentShim(TestCase):
         self.GoalState.return_value.instance_id = 'TestInstanceId'
         shim = azure_helper.WALinuxAgentShim()
         shim.register_with_azure_and_fetch_data()
-        posted_document = shim.http_client.post.call_args[1]['data']
+        posted_document = (
+            self.AzureEndpointHttpClient.return_value.post.call_args[1]['data']
+        )
         self.assertIn('TestIncarnation', posted_document)
         self.assertIn('TestContainerId', posted_document)
         self.assertIn('TestInstanceId', posted_document)
+
+    def test_clean_up_can_be_called_at_any_time(self):
+        shim = azure_helper.WALinuxAgentShim()
+        shim.clean_up()
+
+    def test_clean_up_will_clean_up_openssl_manager_if_instantiated(self):
+        shim = azure_helper.WALinuxAgentShim()
+        shim.register_with_azure_and_fetch_data()
+        shim.clean_up()
+        self.assertEqual(
+            1, self.OpenSSLManager.return_value.clean_up.call_count)
+
+
+class TestGetMetadataFromFabric(TestCase):
+
+    @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+    def test_data_from_shim_returned(self, shim):
+        ret = azure_helper.get_metadata_from_fabric()
+        self.assertEqual(
+            shim.return_value.register_with_azure_and_fetch_data.return_value,
+            ret)
+
+    @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+    def test_success_calls_clean_up(self, shim):
+        azure_helper.get_metadata_from_fabric()
+        self.assertEqual(1, shim.return_value.clean_up.call_count)
+
+    @mock.patch.object(azure_helper, 'WALinuxAgentShim')
+    def test_failure_in_registration_calls_clean_up(self, shim):
+        class SentinelException(Exception):
+            pass
+        shim.return_value.register_with_azure_and_fetch_data.side_effect = (
+            SentinelException)
+        self.assertRaises(SentinelException,
+                          azure_helper.get_metadata_from_fabric)
+        self.assertEqual(1, shim.return_value.clean_up.call_count)
-- 
cgit v1.2.3


From 1185aeae80fc8279946069bb8eec492b3cb81556 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 16:22:36 +0100
Subject: Reintroduce original code path.

---
 cloudinit/sources/DataSourceAzure.py          | 74 +++++++++++++++++++++------
 tests/unittests/test_datasource/test_azure.py |  5 ++
 2 files changed, 63 insertions(+), 16 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 4053cfa6..3c7820a6 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -22,6 +22,7 @@ import crypt
 import fnmatch
 import os
 import os.path
+import time
 from xml.dom import minidom
 
 from cloudinit import log as logging
@@ -35,11 +36,13 @@ LOG = logging.getLogger(__name__)
 
 DS_NAME = 'Azure'
 DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
+AGENT_START = ['service', 'walinuxagent', 'start']
 BOUNCE_COMMAND = ['sh', '-xc',
     "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
 DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
 
 BUILTIN_DS_CONFIG = {
+    'agent_command': '__builtin__',
     'data_dir': "/var/lib/waagent",
     'set_hostname': True,
     'hostname_bounce': {
@@ -110,6 +113,56 @@ class DataSourceAzureNet(sources.DataSource):
         root = sources.DataSource.__str__(self)
         return "%s [seed=%s]" % (root, self.seed)
 
+    def get_metadata_from_agent(self):
+        temp_hostname = self.metadata.get('local-hostname')
+        hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+        with temporary_hostname(temp_hostname, self.ds_cfg,
+                                hostname_command=hostname_command) \
+                as previous_hostname:
+            if (previous_hostname is not None
+                    and util.is_true(self.ds_cfg.get('set_hostname'))):
+                cfg = self.ds_cfg['hostname_bounce']
+                try:
+                    perform_hostname_bounce(hostname=temp_hostname,
+                                            cfg=cfg,
+                                            prev_hostname=previous_hostname)
+                except Exception as e:
+                    LOG.warn("Failed publishing hostname: %s", e)
+                    util.logexc(LOG, "handling set_hostname failed")
+
+            try:
+                invoke_agent(self.ds_cfg['agent_command'])
+            except util.ProcessExecutionError:
+                # claim the datasource even if the command failed
+                util.logexc(LOG, "agent command '%s' failed.",
+                            self.ds_cfg['agent_command'])
+
+            ddir = self.ds_cfg['data_dir']
+            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
+            wait_for = [shcfgxml]
+
+            fp_files = []
+            for pk in self.cfg.get('_pubkeys', []):
+                bname = str(pk['fingerprint'] + ".crt")
+                fp_files += [os.path.join(ddir, bname)]
+
+            missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+                                    func=wait_for_files,
+                                    args=(wait_for + fp_files,))
+        if len(missing):
+            LOG.warn("Did not find files, but going on: %s", missing)
+
+        metadata = {}
+        if shcfgxml in missing:
+            LOG.warn("SharedConfig.xml missing, using static instance-id")
+        else:
+            try:
+                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
+            except ValueError as e:
+                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+        metadata['public-keys'] = pubkeys_from_crt_files(fp_files)
+        return metadata
+
     def get_data(self):
         # azure removes/ejects the cdrom containing the ovf-env.xml
         # file on reboot.  So, in order to successfully reboot we
@@ -162,8 +215,6 @@ class DataSourceAzureNet(sources.DataSource):
         # now update ds_cfg to reflect contents pass in config
         user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
         self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
-        mycfg = self.ds_cfg
-        ddir = mycfg['data_dir']
 
         if found != ddir:
             cached_ovfenv = util.load_file(
@@ -184,8 +235,12 @@ class DataSourceAzureNet(sources.DataSource):
         # the directory to be protected.
         write_files(ddir, files, dirmode=0o700)
 
+        if self.ds_cfg['agent_command'] == '__builtin__':
+            metadata_func = get_metadata_from_fabric
+        else:
+            metadata_func = self.get_metadata_from_agent
         try:
-            fabric_data = get_metadata_from_fabric()
+            fabric_data = metadata_func()
         except Exception as exc:
             LOG.info("Error communicating with Azure fabric; assume we aren't"
                      " on Azure.", exc_info=True)
@@ -567,19 +622,6 @@ def iid_from_shared_config(path):
     return iid_from_shared_config_content(content)
 
 
-def iid_from_shared_config_content(content):
-    """
-    find INSTANCE_ID in:
-    <?xml version="1.0" encoding="utf-8"?>
-    <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
-      <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
-        <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" />
-    """
-    dom = minidom.parseString(content)
-    depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"])
-    return depnode.attributes.get('name').value
-
-
 class BrokenAzureDataSource(Exception):
     pass
 
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 983be4cd..c72dc801 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -389,11 +389,13 @@ class TestAzureDataSource(TestCase):
 
     def test_exception_fetching_fabric_data_doesnt_propagate(self):
         ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        ds.ds_cfg['agent_command'] = '__builtin__'
         self.get_metadata_from_fabric.side_effect = Exception
         self.assertFalse(ds.get_data())
 
     def test_fabric_data_included_in_metadata(self):
         ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        ds.ds_cfg['agent_command'] = '__builtin__'
         self.get_metadata_from_fabric.return_value = {'test': 'value'}
         ret = ds.get_data()
         self.assertTrue(ret)
@@ -419,6 +421,9 @@ class TestAzureBounce(TestCase):
         self.patches.enter_context(
             mock.patch.object(DataSourceAzure, 'find_ephemeral_part',
                               mock.MagicMock(return_value=None)))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
+                              mock.MagicMock(return_value={})))
 
     def setUp(self):
         super(TestAzureBounce, self).setUp()
-- 
cgit v1.2.3


From d8a1910ae79478b8976c4950219d37e15640e7e7 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 16:52:12 +0100
Subject: Default to old code path.

---
 cloudinit/sources/DataSourceAzure.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 3c7820a6..f2388c63 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -42,7 +42,7 @@ BOUNCE_COMMAND = ['sh', '-xc',
 DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
 
 BUILTIN_DS_CONFIG = {
-    'agent_command': '__builtin__',
+    'agent_command': AGENT_START,
     'data_dir': "/var/lib/waagent",
     'set_hostname': True,
     'hostname_bounce': {
-- 
cgit v1.2.3


From 512eb552e0ca740e1d285dc1b66a56579bcf68ec Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 16:52:49 +0100
Subject: Fix retrying.

---
 cloudinit/sources/helpers/azure.py                   |  9 +++++++--
 tests/unittests/test_datasource/test_azure_helper.py | 11 +++++++++++
 2 files changed, 18 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index dfdfa7c2..2ce728f5 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -242,14 +242,19 @@ class WALinuxAgentShim(object):
         self.openssl_manager = OpenSSLManager()
         http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
         LOG.info('Registering with Azure...')
-        for i in range(10):
+        attempts = 0
+        while True:
             try:
                 response = http_client.get(
                     'http://{}/machine/?comp=goalstate'.format(self.endpoint))
             except Exception:
-                time.sleep(i + 1)
+                if attempts < 10:
+                    time.sleep(attempts + 1)
+                else:
+                    raise
             else:
                 break
+            attempts += 1
         LOG.debug('Successfully fetched GoalState XML.')
         goal_state = GoalState(response.contents, http_client)
         public_keys = []
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 5fac2ade..23bc997c 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -323,6 +323,8 @@ class TestWALinuxAgentShim(TestCase):
             mock.patch.object(azure_helper, 'iid_from_shared_config_content'))
         self.OpenSSLManager = patches.enter_context(
             mock.patch.object(azure_helper, 'OpenSSLManager'))
+        patches.enter_context(
+            mock.patch.object(azure_helper.time, 'sleep', mock.MagicMock()))
 
     def test_http_client_uses_certificate(self):
         shim = azure_helper.WALinuxAgentShim()
@@ -402,6 +404,15 @@ class TestWALinuxAgentShim(TestCase):
         self.assertEqual(
             1, self.OpenSSLManager.return_value.clean_up.call_count)
 
+    def test_failure_to_fetch_goalstate_bubbles_up(self):
+        class SentinelException(Exception):
+            pass
+        self.AzureEndpointHttpClient.return_value.get.side_effect = (
+            SentinelException)
+        shim = azure_helper.WALinuxAgentShim()
+        self.assertRaises(SentinelException,
+                          shim.register_with_azure_and_fetch_data)
+
 
 class TestGetMetadataFromFabric(TestCase):
 
-- 
cgit v1.2.3


From dad01d2cf14a7e0bdca455040fb5a173775cefdc Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 8 May 2015 16:52:58 +0100
Subject: Python 2.6 fixes.

---
 cloudinit/sources/helpers/azure.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 2ce728f5..281d733e 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -246,7 +246,7 @@ class WALinuxAgentShim(object):
         while True:
             try:
                 response = http_client.get(
-                    'http://{}/machine/?comp=goalstate'.format(self.endpoint))
+                    'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
             except Exception:
                 if attempts < 10:
                     time.sleep(attempts + 1)
@@ -278,7 +278,7 @@ class WALinuxAgentShim(object):
             instance_id=goal_state.instance_id,
         )
         http_client.post(
-            "http://{}/machine?comp=health".format(self.endpoint),
+            "http://{0}/machine?comp=health".format(self.endpoint),
             data=document,
             extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
         )
-- 
cgit v1.2.3


From 8b8a90372058205496f42abf2a3d0dc04c7eab3f Mon Sep 17 00:00:00 2001
From: Brent Baude <bbaude@redhat.com>
Date: Thu, 14 May 2015 14:29:42 -0500
Subject: This patch adds a cloud-init plugin for helping users register and
 subscribe their RHEL based systems.  As inputs, it can take:

- user and password OR activation key and org | requires on of the
    two pair
- auto-attach: True or False | optional
- service-level: <string>  | optional
- add-pool [list, of, pool, ids] | optional
- enable-repos [list, of, yum, repos, to, enable] | optional
- disable-repos [list, of, yum, repos, to, disable] | optional

You can also pass the following to influence your registration via rhsm.conf:

- rhsm-baseurl | optional
- server-hostname | optional
---
 cloudinit/config/cc_rh_subscription.py | 399 +++++++++++++++++++++++++++++++++
 1 file changed, 399 insertions(+)
 create mode 100644 cloudinit/config/cc_rh_subscription.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
new file mode 100644
index 00000000..b8056dbb
--- /dev/null
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -0,0 +1,399 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) Red Hat, Inc.
+#
+#    Author: Brent Baude <bbaude@redhat.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import os
+import subprocess
+import itertools
+
+
+def handle(_name, cfg, _cloud, log, _args):
+    sm = SubscriptionManager(cfg)
+    sm.log = log
+
+    if not sm.is_registered:
+        try:
+            verify, verify_msg = sm._verify_keys()
+            if verify is not True:
+                raise SubscriptionError(verify_msg)
+            cont = sm.rhn_register()
+            if not cont:
+                raise SubscriptionError("Registration failed or did not "
+                                        "run completely")
+
+            # Splitting up the registration, auto-attach, and servicelevel
+            # commands because the error codes, messages from subman are not
+            # specific enough.
+
+            # Attempt to change the service level
+            if sm.auto_attach and sm.servicelevel is not None:
+                    if not sm._set_service_level():
+                        raise SubscriptionError("Setting of service-level "
+                                                "failed")
+                    else:
+                        sm.log.info("Completed auto-attach with service level")
+            elif sm.auto_attach:
+                if not sm._set_auto_attach():
+                    raise SubscriptionError("Setting auto-attach failed")
+                else:
+                    sm.log.info("Completed auto-attach")
+
+            if sm.pools is not None:
+                if type(sm.pools) is not list:
+                    raise SubscriptionError("Pools must in the format of a "
+                                            "list.")
+                return_stat = sm.addPool(sm.pools)
+                if not return_stat:
+                    raise SubscriptionError("Unable to attach pools {0}"
+                                            .format(sm.pools))
+            if (sm.enable_repo is not None) or (sm.disable_repo is not None):
+                return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
+                if not return_stat:
+                    raise SubscriptionError("Unable to add or remove repos")
+            sm.log.info("rh_subscription plugin completed successfully")
+        except SubscriptionError as e:
+            sm.log.warn(e)
+            sm.log.info("rh_subscription plugin did not complete successfully")
+    else:
+        sm.log.info("System is already registered")
+
+
+class SubscriptionError(Exception):
+    pass
+
+
+class SubscriptionManager(object):
+    def __init__(self, cfg):
+        self.cfg = cfg
+        self.rhel_cfg = self.cfg.get('rh_subscription', {})
+        self.rhsm_baseurl = self.rhel_cfg.get('rhsm-baseurl')
+        self.server_hostname = self.rhel_cfg.get('server-hostname')
+        self.pools = self.rhel_cfg.get('add-pool')
+        self.activation_key = self.rhel_cfg.get('activation-key')
+        self.org = self.rhel_cfg.get('org')
+        self.userid = self.rhel_cfg.get('username')
+        self.password = self.rhel_cfg.get('password')
+        self.auto_attach = self.rhel_cfg.get('auto-attach')
+        self.enable_repo = self.rhel_cfg.get('enable-repo')
+        self.disable_repo = self.rhel_cfg.get('disable-repo')
+        self.servicelevel = self.rhel_cfg.get('service-level')
+        self.subman = ['/bin/subscription-manager']
+        self.valid_rh_keys = ['org', 'activation-key', 'username', 'password',
+                              'disable-repo', 'enable-repo', 'add-pool',
+                              'rhsm-baseurl', 'server-hostname',
+                              'auto-attach', 'service-level']
+        self.is_registered = self._is_registered()
+
+    def _verify_keys(self):
+        '''
+        Checks that the keys in the rh_subscription dict from the user-data
+        are what we expect.
+        '''
+
+        for k in self.rhel_cfg:
+            if k not in self.valid_rh_keys:
+                bad_key = "{0} is not a valid key for rh_subscription. "\
+                          "Valid keys are: "\
+                          "{1}".format(k, ', '.join(self.valid_rh_keys))
+                return False, bad_key
+
+        # Check for bad auto-attach value
+        if (self.auto_attach is not None) and \
+                (str(self.auto_attach).upper() not in ['TRUE', 'FALSE']):
+            not_bool = "The key auto-attach must be a value of "\
+                       "either True or False"
+            return False, not_bool
+
+        if (self.servicelevel is not None) and \
+            ((not self.auto_attach) or
+                (str(self.auto_attach).upper() == "FALSE")):
+
+            no_auto = "The service-level key must be used in conjunction with "\
+                      "the auto-attach key.  Please re-run with auto-attach: "\
+                      "True"
+            return False, no_auto
+        return True, None
+
+    def _is_registered(self):
+        '''
+        Checks if the system is already registered and returns
+        True if so, else False
+        '''
+        cmd = list(itertools.chain(self.subman, ['identity']))
+
+        if subprocess.call(cmd, stdout=open(os.devnull, 'wb'),
+                           stderr=open(os.devnull, 'wb')) == 1:
+            return False
+        else:
+            return True
+
+    def rhn_register(self):
+        '''
+        Registers the system by userid and password or activation key
+        and org.  Returns True when successful False when not.
+        '''
+
+        if (self.activation_key is not None) and (self.org is not None):
+            # register by activation key
+            cmd = list(itertools.chain(self.subman, ['register',
+                                       '--activationkey={0}'.
+                       format(self.activation_key),
+                       '--org={0}'.format(self.org)]))
+
+            # If the baseurl and/or server url are passed in, we register
+            # with them.
+
+            if self.rhsm_baseurl is not None:
+                cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
+
+            if self.server_hostname is not None:
+                cmd.append("--serverurl={0}".format(self.server_hostname))
+
+            return_msg, return_code = self._captureRun(cmd)
+
+            if return_code is not 0:
+                self.log.warn("Registration with {0} and {1} failed.".format(
+                              self.activation_key, self.org))
+                return False
+
+        elif (self.userid is not None) and (self.password is not None):
+            # register by username and password
+            cmd = list(itertools.chain(self.subman, ['register',
+                       '--username={0}'.format(self.userid),
+                       '--password={0}'.format(self.password)]))
+
+            # If the baseurl and/or server url are passed in, we register
+            # with them.
+
+            if self.rhsm_baseurl is not None:
+                cmd.append("--baseurl={0}".format(self.rhsm_baseurl))
+
+            if self.server_hostname is not None:
+                cmd.append("--serverurl={0}".format(self.server_hostname))
+
+            # Attempting to register the system only
+            return_msg, return_code = self._captureRun(cmd)
+
+            if return_code is not 0:
+                # Return message is in a set
+                if return_msg[0] == "":
+                    self.log.warn("Registration failed")
+                    if return_msg[1] is not "":
+                        self.log.warn(return_msg[1])
+                return False
+
+        else:
+            self.log.warn("Unable to register system due to incomplete "
+                          "information.")
+            self.log.warn("Use either activationkey and org *or* userid "
+                          "and password")
+            return False
+
+        reg_id = return_msg[0].split("ID: ")[1].rstrip()
+        self.log.info("Registered successfully with ID {0}".format(reg_id))
+        return True
+
+    def _set_service_level(self):
+        cmd = list(itertools.chain(self.subman,
+                                   ['attach', '--auto', '--servicelevel={0}'
+                                    .format(self.servicelevel)]))
+
+        return_msg, return_code = self._captureRun(cmd)
+
+        if return_code is not 0:
+            self.log.warn("Setting the service level failed with: "
+                          "{0}".format(return_msg[1].strip()))
+            return False
+        else:
+            for line in return_msg[0].split("\n"):
+                if line is not "":
+                    self.log.info(line)
+            return True
+
+    def _set_auto_attach(self):
+        cmd = list(itertools.chain(self.subman, ['attach', '--auto']))
+        return_msg, return_code = self._captureRun(cmd)
+
+        if return_code is not 0:
+            self.log.warn("Auto-attach failed with: "
+                          "{0}]".format(return_msg[1].strip()))
+            return False
+        else:
+            for line in return_msg[0].split("\n"):
+                if line is not "":
+                    self.log.info(line)
+            return True
+
+    def _captureRun(self, cmd):
+        '''
+        Subprocess command that captures and returns the output and
+        return code.
+        '''
+
+        r = subprocess.Popen(cmd, stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        return r.communicate(), r.returncode
+
+    def _getPools(self):
+        '''
+        Gets the list pools for the active subscription and returns them
+        in list form.
+        '''
+        available = []
+        consumed = []
+
+        # Get all available pools
+        cmd = list(itertools.chain(self.subman, ['list', '--available',
+                                                 '--pool-only']))
+        results = subprocess.check_output(cmd)
+        available = (results.rstrip()).split("\n")
+
+        # Get all available pools
+        cmd = list(itertools.chain(self.subman, ['list', '--consumed',
+                                                 '--pool-only']))
+        results = subprocess.check_output(cmd)
+        consumed = (results.rstrip()).split("\n")
+        return available, consumed
+
+    def _getRepos(self):
+        '''
+        Obtains the current list of active yum repositories and returns
+        them in list form.
+        '''
+
+        cmd = list(itertools.chain(self.subman, ['repos', '--list-enabled']))
+        result, return_code = self._captureRun(cmd)
+
+        active_repos = []
+        for repo in result[0].split("\n"):
+            if "Repo ID:" in repo:
+                active_repos.append((repo.split(':')[1]).strip())
+
+        cmd = list(itertools.chain(self.subman, ['repos', '--list-disabled']))
+        result, return_code = self._captureRun(cmd)
+
+        inactive_repos = []
+        for repo in result[0].split("\n"):
+            if "Repo ID:" in repo:
+                inactive_repos.append((repo.split(':')[1]).strip())
+
+        return active_repos, inactive_repos
+
+    def addPool(self, pools):
+        '''
+        Takes a list of subscription pools and "attaches" them to the
+        current subscription
+        '''
+
+        # An empty list was passed
+        if len(pools) == 0:
+            self.log.info("No pools to attach")
+            return True
+
+        pool_available, pool_consumed = self._getPools()
+        pool_list = []
+        cmd = list(itertools.chain(self.subman, ['attach']))
+        for pool in pools:
+            if (pool not in pool_consumed) and (pool in pool_available):
+                pool_list.append('--pool={0}'.format(pool))
+            else:
+                self.log.warn("Pool {0} is not available".format(pool))
+        if len(pool_list) > 0:
+            cmd.extend(pool_list)
+            try:
+                self._captureRun(cmd)
+                self.log.info("Attached the following pools to your "
+                              "system: %s" % (", ".join(pool_list))
+                              .replace('--pool=', ''))
+                return True
+            except subprocess.CalledProcessError:
+                self.log.warn("Unable to attach pool {0}".format(pool))
+                return False
+
+    def update_repos(self, erepos, drepos):
+        '''
+        Takes a list of yum repo ids that need to be disabled or enabled; then
+        it verifies if they are already enabled or disabled and finally
+        executes the action to disable or enable
+        '''
+
+        if (erepos is not None) and (type(erepos) is not list):
+            self.log.warn("Repo IDs must in the format of a list.")
+            return False
+
+        if (drepos is not None) and (type(drepos) is not list):
+            self.log.warn("Repo IDs must in the format of a list.")
+            return False
+
+        # Bail if both lists are not populated
+        if (len(erepos) == 0) and (len(drepos) == 0):
+            self.log.info("No repo IDs to enable or disable")
+            return True
+
+        active_repos, inactive_repos = self._getRepos()
+        # Creating a list of repoids to be enabled
+        enable_list = []
+        enable_list_fail = []
+        for repoid in erepos:
+            if (repoid in inactive_repos):
+                enable_list.append("--enable={0}".format(repoid))
+            else:
+                enable_list_fail.append(repoid)
+
+        # Creating a list of repoids to be disabled
+        disable_list = []
+        disable_list_fail = []
+        for repoid in drepos:
+            if repoid in active_repos:
+                disable_list.append("--disable={0}".format(repoid))
+            else:
+                disable_list_fail.append(repoid)
+
+        # Logging any repos that are already enabled or disabled
+        if len(enable_list_fail) > 0:
+            for fail in enable_list_fail:
+                # Check if the repo exists or not
+                if fail in active_repos:
+                    self.log.info("Repo {0} is already enabled".format(fail))
+                else:
+                    self.log.warn("Repo {0} does not appear to "
+                                  "exist".format(fail))
+        if len(disable_list_fail) > 0:
+            for fail in disable_list_fail:
+                self.log.info("Repo {0} not disabled "
+                              "because it is not enabled".format(fail))
+
+        cmd = list(itertools.chain(self.subman, ['repos']))
+        if enable_list > 0:
+            cmd.extend(enable_list)
+        if disable_list > 0:
+            cmd.extend(disable_list)
+
+        try:
+            return_msg, return_code = self._captureRun(cmd)
+
+        except subprocess.CalledProcessError as e:
+            self.log.warn("Unable to alter repos due to {0}".format(e))
+            return False
+
+        if enable_list > 0:
+            self.log.info("Enabled the following repos: %s" %
+                          (", ".join(enable_list)).replace('--enable=', ''))
+        if disable_list > 0:
+            self.log.info("Disabled the following repos: %s" %
+                          (", ".join(disable_list)).replace('--disable=', ''))
+        return True
-- 
cgit v1.2.3


From 74023961b70a178039ecf10f68745f6927113978 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 14 May 2015 17:06:39 -0400
Subject: read_seeded: fix reed_seeded after regression

read_seeded was assuming a Response object back from load_tfile_or_url
but load_tfile_or_url was returning string.

since the only other user of this was a test, move load_tfile_or_url to
a test, and just do the right thing in read_seeded.

LP: #1455233
---
 cloudinit/util.py                                       |  8 ++------
 .../test_handler/test_handler_apt_configure.py          | 15 +++++++++------
 tests/unittests/test_util.py                            | 17 +++++++++++++++++
 3 files changed, 28 insertions(+), 12 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index cae57770..db4e02b8 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -766,10 +766,6 @@ def fetch_ssl_details(paths=None):
     return ssl_details
 
 
-def load_tfile_or_url(*args, **kwargs):
-    return(decode_binary(read_file_or_url(*args, **kwargs).contents))
-
-
 def read_file_or_url(url, timeout=5, retries=10,
                      headers=None, data=None, sec_between=1, ssl_details=None,
                      headers_cb=None, exception_cb=None):
@@ -837,10 +833,10 @@ def read_seeded(base="", ext="", timeout=5, retries=10, file_retries=0):
         ud_url = "%s%s%s" % (base, "user-data", ext)
         md_url = "%s%s%s" % (base, "meta-data", ext)
 
-    md_resp = load_tfile_or_url(md_url, timeout, retries, file_retries)
+    md_resp = read_file_or_url(md_url, timeout, retries, file_retries)
     md = None
     if md_resp.ok():
-        md = load_yaml(md_resp.contents, default={})
+        md = load_yaml(decode_binary(md_resp.contents), default={})
 
     ud_resp = read_file_or_url(ud_url, timeout, retries, file_retries)
     ud = None
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 895728b3..4a74ea47 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -8,6 +8,9 @@ import re
 import shutil
 import tempfile
 
+def load_tfile_or_url(*args, **kwargs):
+    return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
+
 
 class TestAptProxyConfig(TestCase):
     def setUp(self):
@@ -29,7 +32,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = util.load_tfile_or_url(self.pfile)
+        contents = load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
 
     def test_apt_http_proxy_written(self):
@@ -39,7 +42,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = util.load_tfile_or_url(self.pfile)
+        contents = load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
 
     def test_apt_all_proxy_written(self):
@@ -57,7 +60,7 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.pfile))
         self.assertFalse(os.path.isfile(self.cfile))
 
-        contents = util.load_tfile_or_url(self.pfile)
+        contents = load_tfile_or_url(self.pfile)
 
         for ptype, pval in values.items():
             self.assertTrue(self._search_apt_config(contents, ptype, pval))
@@ -73,7 +76,7 @@ class TestAptProxyConfig(TestCase):
         cc_apt_configure.apply_apt_config({'apt_proxy': "foo"},
                                           self.pfile, self.cfile)
         self.assertTrue(os.path.isfile(self.pfile))
-        contents = util.load_tfile_or_url(self.pfile)
+        contents = load_tfile_or_url(self.pfile)
         self.assertTrue(self._search_apt_config(contents, "http", "foo"))
 
     def test_config_written(self):
@@ -85,14 +88,14 @@ class TestAptProxyConfig(TestCase):
         self.assertTrue(os.path.isfile(self.cfile))
         self.assertFalse(os.path.isfile(self.pfile))
 
-        self.assertEqual(util.load_tfile_or_url(self.cfile), payload)
+        self.assertEqual(load_tfile_or_url(self.cfile), payload)
 
     def test_config_replaced(self):
         util.write_file(self.pfile, "content doesnt matter")
         cc_apt_configure.apply_apt_config({'apt_config': "foo"},
                                           self.pfile, self.cfile)
         self.assertTrue(os.path.isfile(self.cfile))
-        self.assertEqual(util.load_tfile_or_url(self.cfile), "foo")
+        self.assertEqual(load_tfile_or_url(self.cfile), "foo")
 
     def test_config_deleted(self):
         # if no 'apt_config' is provided, delete any previously written file
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 1619b5d2..95990165 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -459,4 +459,21 @@ class TestMessageFromString(helpers.TestCase):
         roundtripped = util.message_from_string(u'\n').as_string()
         self.assertNotIn('\x00', roundtripped)
 
+
+class TestReadSeeded(helpers.TestCase):
+    def setUp(self):
+        super(TestReadSeeded, self).setUp()
+        self.tmp = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.tmp)
+
+    def test_unicode_not_messed_up(self):
+        ud = b"userdatablob"
+        helpers.populate_dir(
+            self.tmp, {'meta-data': "key1: val1", 'user-data': ud})
+        sdir = self.tmp + os.path.sep
+        (found_md, found_ud) = util.read_seeded(sdir)
+
+        self.assertEqual(found_md, {'key1': 'val1'})
+        self.assertEqual(found_ud, ud)
+
 # vi: ts=4 expandtab
-- 
cgit v1.2.3


From 33db529855c0c0746091868c69f5d694bff7b9a8 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 15 May 2015 16:28:24 -0400
Subject: pep8 fixes

---
 cloudinit/distros/rhel.py                                 |  4 ++--
 tests/unittests/test_datasource/test_azure.py             |  1 -
 tests/unittests/test_datasource/test_azure_helper.py      | 15 ++++++++++-----
 .../unittests/test_handler/test_handler_apt_configure.py  |  1 +
 4 files changed, 13 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index eec17c61..30c805a6 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -133,7 +133,7 @@ class Distro(distros.Distro):
         rhel_util.update_sysconfig_file(out_fn, locale_cfg)
 
     def _write_hostname(self, hostname, out_fn):
-        # systemd will never update previous-hostname for us, so 
+        # systemd will never update previous-hostname for us, so
         # we need to do it ourselves
         if self.uses_systemd() and out_fn.endswith('/previous-hostname'):
             util.write_file(out_fn, hostname)
@@ -161,7 +161,7 @@ class Distro(distros.Distro):
 
     def _read_hostname(self, filename, default=None):
         if self.uses_systemd() and filename.endswith('/previous-hostname'):
-            return util.load_file(filename).strip()  
+            return util.load_file(filename).strip()
         elif self.uses_systemd():
             (out, _err) = util.subp(['hostname'])
             if len(out):
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index c72dc801..4c4b8eec 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -587,4 +587,3 @@ class TestReadAzureOvf(TestCase):
         (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
         for mypk in mypklist:
             self.assertIn(mypk, cfg['_pubkeys'])
-
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 23bc997c..a5228870 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -18,7 +18,8 @@ except ImportError:
 
 GOAL_STATE_TEMPLATE = """\
 <?xml version="1.0" encoding="utf-8"?>
-<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="goalstate10.xsd">
+<GoalState xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:noNamespaceSchemaLocation="goalstate10.xsd">
   <Version>2012-11-30</Version>
   <Incarnation>{incarnation}</Incarnation>
   <Machine>
@@ -36,12 +37,16 @@ GOAL_STATE_TEMPLATE = """\
         <InstanceId>{instance_id}</InstanceId>
         <State>Started</State>
         <Configuration>
-          <HostingEnvironmentConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=hostingEnvironmentConfig&amp;incarnation=1</HostingEnvironmentConfig>
+          <HostingEnvironmentConfig>
+            http://100.86.192.70:80/...hostingEnvironmentConfig...
+          </HostingEnvironmentConfig>
           <SharedConfig>{shared_config_url}</SharedConfig>
-          <ExtensionsConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=extensionsConfig&amp;incarnation=1</ExtensionsConfig>
-          <FullConfig>http://100.86.192.70:80/machine/46504ebc-f968-4f23-b9aa-cd2b3e4d470c/68ce47b32ea94952be7b20951c383628.utl%2Dtrusty%2D%2D292258?comp=config&amp;type=fullConfig&amp;incarnation=1</FullConfig>
+          <ExtensionsConfig>
+            http://100.86.192.70:80/...extensionsConfig...
+          </ExtensionsConfig>
+          <FullConfig>http://100.86.192.70:80/...fullConfig...</FullConfig>
           <Certificates>{certificates_url}</Certificates>
-          <ConfigName>68ce47b32ea94952be7b20951c383628.0.68ce47b32ea94952be7b20951c383628.0.utl-trusty--292258.1.xml</ConfigName>
+          <ConfigName>68ce47.0.68ce47.0.utl-trusty--292258.1.xml</ConfigName>
         </Configuration>
       </RoleInstance>
     </RoleInstanceList>
diff --git a/tests/unittests/test_handler/test_handler_apt_configure.py b/tests/unittests/test_handler/test_handler_apt_configure.py
index 4a74ea47..1ed185ca 100644
--- a/tests/unittests/test_handler/test_handler_apt_configure.py
+++ b/tests/unittests/test_handler/test_handler_apt_configure.py
@@ -8,6 +8,7 @@ import re
 import shutil
 import tempfile
 
+
 def load_tfile_or_url(*args, **kwargs):
     return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
 
-- 
cgit v1.2.3


From 151ece4efcd6d8f5051e86dff2bcd7d218e50ca2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 19 May 2015 08:21:34 -0700
Subject: EC2: be aware of eu-central-1 availability zone

eu-central-1 means that 'central' is a direction to update the
regular expression to understand.

LP: #1456684
---
 ChangeLog                     | 1 +
 cloudinit/distros/__init__.py | 8 ++++++--
 2 files changed, 7 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 0b4175b7..ff06e989 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -41,6 +41,7 @@
  - Fix exception when running with no arguments on Python 3. [Daniel Watkins]
  - Centos: detect/expect use of systemd on centos 7. [Brian Rak]
  - Azure: remove dependency on walinux-agent [Daniel Watkins]
+ - EC2: know about eu-central-1 availability-zone (LP: #1456684)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 05721922..e0cce670 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -556,8 +556,12 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
     if not mirror_info:
         mirror_info = {}
 
-    ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" %
-        "north|northeast|east|southeast|south|southwest|west|northwest")
+    # ec2 availability zones are named cc-direction-[0-9][a-d] (us-east-1b)
+    # the region is us-east-1. so region = az[0:-1]
+    directions_re = '|'.join([
+        'central', 'east', 'north', 'northeast', 'northwest',
+        'south', 'southeast', 'southwest', 'west'])
+    ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
 
     subst = {}
     if availability_zone:
-- 
cgit v1.2.3


From cf2b017c8bf2adb02a2c7a9c9f03754402cb73c4 Mon Sep 17 00:00:00 2001
From: Brent Baude <bbaude@redhat.com>
Date: Thu, 21 May 2015 13:32:30 -0500
Subject: This commit consists of three things based on feedback from smosher:

cc_rh_subscription: Use of self.log.info limited, uses the util.subp for subprocesses, removed full path for subscription-manager

cloud-config-rh_subscription.txt: A heavily commented example file on how to use rh_subscription and its main keys

test_rh_subscription.py: a set of unittests for rh_subscription
---
 cloudinit/config/cc_rh_subscription.py  | 181 +++++++++++++------------
 tests/unittests/test_rh_subscription.py | 231 ++++++++++++++++++++++++++++++++
 2 files changed, 323 insertions(+), 89 deletions(-)
 create mode 100644 tests/unittests/test_rh_subscription.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index b8056dbb..00a88456 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -16,15 +16,13 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import os
-import subprocess
 import itertools
+from cloudinit import util
 
 
 def handle(_name, cfg, _cloud, log, _args):
     sm = SubscriptionManager(cfg)
     sm.log = log
-
     if not sm.is_registered:
         try:
             verify, verify_msg = sm._verify_keys()
@@ -41,21 +39,22 @@ def handle(_name, cfg, _cloud, log, _args):
 
             # Attempt to change the service level
             if sm.auto_attach and sm.servicelevel is not None:
-                    if not sm._set_service_level():
-                        raise SubscriptionError("Setting of service-level "
-                                                "failed")
-                    else:
-                        sm.log.info("Completed auto-attach with service level")
+                if not sm._set_service_level():
+                    raise SubscriptionError("Setting of service-level "
+                                            "failed")
+                else:
+                    sm.log.debug("Completed auto-attach with service level")
             elif sm.auto_attach:
                 if not sm._set_auto_attach():
                     raise SubscriptionError("Setting auto-attach failed")
                 else:
-                    sm.log.info("Completed auto-attach")
+                    sm.log.debug("Completed auto-attach")
 
             if sm.pools is not None:
                 if type(sm.pools) is not list:
-                    raise SubscriptionError("Pools must in the format of a "
-                                            "list.")
+                    pool_fail = "Pools must in the format of a list"
+                    raise SubscriptionError(pool_fail)
+                                            
                 return_stat = sm.addPool(sm.pools)
                 if not return_stat:
                     raise SubscriptionError("Unable to attach pools {0}"
@@ -66,8 +65,8 @@ def handle(_name, cfg, _cloud, log, _args):
                     raise SubscriptionError("Unable to add or remove repos")
             sm.log.info("rh_subscription plugin completed successfully")
         except SubscriptionError as e:
-            sm.log.warn(e)
-            sm.log.info("rh_subscription plugin did not complete successfully")
+            sm.log.warn(str(e))
+            sm.log.warn("rh_subscription plugin did not complete successfully")
     else:
         sm.log.info("System is already registered")
 
@@ -91,7 +90,7 @@ class SubscriptionManager(object):
         self.enable_repo = self.rhel_cfg.get('enable-repo')
         self.disable_repo = self.rhel_cfg.get('disable-repo')
         self.servicelevel = self.rhel_cfg.get('service-level')
-        self.subman = ['/bin/subscription-manager']
+        self.subman = ['subscription-manager']
         self.valid_rh_keys = ['org', 'activation-key', 'username', 'password',
                               'disable-repo', 'enable-repo', 'add-pool',
                               'rhsm-baseurl', 'server-hostname',
@@ -135,11 +134,22 @@ class SubscriptionManager(object):
         '''
         cmd = list(itertools.chain(self.subman, ['identity']))
 
-        if subprocess.call(cmd, stdout=open(os.devnull, 'wb'),
-                           stderr=open(os.devnull, 'wb')) == 1:
+        try:
+            self._sub_man_cli(cmd)
+        except util.ProcessExecutionError:
             return False
-        else:
-            return True
+
+        return True
+
+    def _sub_man_cli(self, cmd, logstring_val=False):
+        '''
+        Uses the prefered cloud-init subprocess def of util.subp
+        and runs subscription-manager.  Breaking this to a
+        separate function for later use in mocking and unittests
+        '''
+        return_out, return_err = util.subp(cmd, logstring=logstring_val)
+
+        return return_out, return_err
 
     def rhn_register(self):
         '''
@@ -163,11 +173,13 @@ class SubscriptionManager(object):
             if self.server_hostname is not None:
                 cmd.append("--serverurl={0}".format(self.server_hostname))
 
-            return_msg, return_code = self._captureRun(cmd)
-
-            if return_code is not 0:
-                self.log.warn("Registration with {0} and {1} failed.".format(
-                              self.activation_key, self.org))
+            try:
+                return_out, return_err = self._sub_man_cli(cmd,
+                                                           logstring_val=True)
+            except util.ProcessExecutionError as e:
+                if e.stdout == "":
+                    self.log.warn("Registration failed due "
+                                  "to: {0}".format(e.stderr))
                 return False
 
         elif (self.userid is not None) and (self.password is not None):
@@ -186,14 +198,13 @@ class SubscriptionManager(object):
                 cmd.append("--serverurl={0}".format(self.server_hostname))
 
             # Attempting to register the system only
-            return_msg, return_code = self._captureRun(cmd)
-
-            if return_code is not 0:
-                # Return message is in a set
-                if return_msg[0] == "":
-                    self.log.warn("Registration failed")
-                    if return_msg[1] is not "":
-                        self.log.warn(return_msg[1])
+            try:
+                return_out, return_err = self._sub_man_cli(cmd,
+                                                           logstring_val=True)
+            except util.ProcessExecutionError as e:
+                if e.stdout == "":
+                    self.log.warn("Registration failed due "
+                                  "to: {0}".format(e.stderr))
                 return False
 
         else:
@@ -203,8 +214,8 @@ class SubscriptionManager(object):
                           "and password")
             return False
 
-        reg_id = return_msg[0].split("ID: ")[1].rstrip()
-        self.log.info("Registered successfully with ID {0}".format(reg_id))
+        reg_id = return_out.split("ID: ")[1].rstrip()
+        self.log.debug("Registered successfully with ID {0}".format(reg_id))
         return True
 
     def _set_service_level(self):
@@ -212,41 +223,34 @@ class SubscriptionManager(object):
                                    ['attach', '--auto', '--servicelevel={0}'
                                     .format(self.servicelevel)]))
 
-        return_msg, return_code = self._captureRun(cmd)
-
-        if return_code is not 0:
-            self.log.warn("Setting the service level failed with: "
-                          "{0}".format(return_msg[1].strip()))
+        try:
+            return_out, return_err = self._sub_man_cli(cmd)
+        except util.ProcessExecutionError as e:
+            if e.stdout.rstrip() != '':
+                for line in e.stdout.split("\n"):
+                    if line is not '':
+                        self.log.warn(line)
+            else:
+                self.log.warn("Setting the service level failed with: "
+                              "{0}".format(e.stderr.strip()))
             return False
-        else:
-            for line in return_msg[0].split("\n"):
-                if line is not "":
-                    self.log.info(line)
-            return True
+        for line in return_out.split("\n"):
+            if line is not "":
+                self.log.debug(line)
+        return True
 
     def _set_auto_attach(self):
         cmd = list(itertools.chain(self.subman, ['attach', '--auto']))
-        return_msg, return_code = self._captureRun(cmd)
-
-        if return_code is not 0:
+        try:
+            return_out, return_err = self._sub_man_cli(cmd)
+        except util.ProcessExecutionError:
             self.log.warn("Auto-attach failed with: "
-                          "{0}]".format(return_msg[1].strip()))
+                          "{0}]".format(return_err.strip()))
             return False
-        else:
-            for line in return_msg[0].split("\n"):
-                if line is not "":
-                    self.log.info(line)
-            return True
-
-    def _captureRun(self, cmd):
-        '''
-        Subprocess command that captures and returns the output and
-        return code.
-        '''
-
-        r = subprocess.Popen(cmd, stdout=subprocess.PIPE,
-                             stderr=subprocess.PIPE)
-        return r.communicate(), r.returncode
+        for line in return_out.split("\n"):
+            if line is not "":
+                self.log.debug(line)
+        return True
 
     def _getPools(self):
         '''
@@ -259,14 +263,15 @@ class SubscriptionManager(object):
         # Get all available pools
         cmd = list(itertools.chain(self.subman, ['list', '--available',
                                                  '--pool-only']))
-        results = subprocess.check_output(cmd)
+        results, errors = self._sub_man_cli(cmd)
         available = (results.rstrip()).split("\n")
 
-        # Get all available pools
+        # Get all consumed pools
         cmd = list(itertools.chain(self.subman, ['list', '--consumed',
                                                  '--pool-only']))
-        results = subprocess.check_output(cmd)
+        results, errors = self._sub_man_cli(cmd)
         consumed = (results.rstrip()).split("\n")
+
         return available, consumed
 
     def _getRepos(self):
@@ -276,21 +281,19 @@ class SubscriptionManager(object):
         '''
 
         cmd = list(itertools.chain(self.subman, ['repos', '--list-enabled']))
-        result, return_code = self._captureRun(cmd)
-
+        return_out, return_err = self._sub_man_cli(cmd)
         active_repos = []
-        for repo in result[0].split("\n"):
+        for repo in return_out.split("\n"):
             if "Repo ID:" in repo:
                 active_repos.append((repo.split(':')[1]).strip())
 
         cmd = list(itertools.chain(self.subman, ['repos', '--list-disabled']))
-        result, return_code = self._captureRun(cmd)
+        return_out, return_err = self._sub_man_cli(cmd)
 
         inactive_repos = []
-        for repo in result[0].split("\n"):
+        for repo in return_out.split("\n"):
             if "Repo ID:" in repo:
                 inactive_repos.append((repo.split(':')[1]).strip())
-
         return active_repos, inactive_repos
 
     def addPool(self, pools):
@@ -301,7 +304,7 @@ class SubscriptionManager(object):
 
         # An empty list was passed
         if len(pools) == 0:
-            self.log.info("No pools to attach")
+            self.log.debug("No pools to attach")
             return True
 
         pool_available, pool_consumed = self._getPools()
@@ -315,13 +318,14 @@ class SubscriptionManager(object):
         if len(pool_list) > 0:
             cmd.extend(pool_list)
             try:
-                self._captureRun(cmd)
-                self.log.info("Attached the following pools to your "
-                              "system: %s" % (", ".join(pool_list))
-                              .replace('--pool=', ''))
+                self._sub_man_cli(cmd)
+                self.log.debug("Attached the following pools to your "
+                               "system: %s" % (", ".join(pool_list))
+                               .replace('--pool=', ''))
                 return True
-            except subprocess.CalledProcessError:
-                self.log.warn("Unable to attach pool {0}".format(pool))
+            except util.ProcessExecutionError as e:
+                self.log.warn("Unable to attach pool {0} "
+                              "due to {1}".format(pool, e))
                 return False
 
     def update_repos(self, erepos, drepos):
@@ -341,7 +345,7 @@ class SubscriptionManager(object):
 
         # Bail if both lists are not populated
         if (len(erepos) == 0) and (len(drepos) == 0):
-            self.log.info("No repo IDs to enable or disable")
+            self.log.debug("No repo IDs to enable or disable")
             return True
 
         active_repos, inactive_repos = self._getRepos()
@@ -368,14 +372,14 @@ class SubscriptionManager(object):
             for fail in enable_list_fail:
                 # Check if the repo exists or not
                 if fail in active_repos:
-                    self.log.info("Repo {0} is already enabled".format(fail))
+                    self.log.debug("Repo {0} is already enabled".format(fail))
                 else:
                     self.log.warn("Repo {0} does not appear to "
                                   "exist".format(fail))
         if len(disable_list_fail) > 0:
             for fail in disable_list_fail:
-                self.log.info("Repo {0} not disabled "
-                              "because it is not enabled".format(fail))
+                self.log.debug("Repo {0} not disabled "
+                               "because it is not enabled".format(fail))
 
         cmd = list(itertools.chain(self.subman, ['repos']))
         if enable_list > 0:
@@ -384,16 +388,15 @@ class SubscriptionManager(object):
             cmd.extend(disable_list)
 
         try:
-            return_msg, return_code = self._captureRun(cmd)
-
-        except subprocess.CalledProcessError as e:
+            self._sub_man_cli(cmd)
+        except util.ProcessExecutionError as e:
             self.log.warn("Unable to alter repos due to {0}".format(e))
             return False
 
         if enable_list > 0:
-            self.log.info("Enabled the following repos: %s" %
-                          (", ".join(enable_list)).replace('--enable=', ''))
+            self.log.debug("Enabled the following repos: %s" %
+                           (", ".join(enable_list)).replace('--enable=', ''))
         if disable_list > 0:
-            self.log.info("Disabled the following repos: %s" %
-                          (", ".join(disable_list)).replace('--disable=', ''))
+            self.log.debug("Disabled the following repos: %s" %
+                           (", ".join(disable_list)).replace('--disable=', ''))
         return True
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
new file mode 100644
index 00000000..ba9181ec
--- /dev/null
+++ b/tests/unittests/test_rh_subscription.py
@@ -0,0 +1,231 @@
+from cloudinit import util
+from cloudinit.config import cc_rh_subscription
+import logging
+import mock
+import unittest
+
+
+class GoodTests(unittest.TestCase):
+    def setUp(self):
+        super(GoodTests, self).setUp()
+        self.name = "cc_rh_subscription"
+        self.cloud_init = None
+        self.log = logging.getLogger("good_tests")
+        self.args = []
+        self.handle = cc_rh_subscription.handle
+        self.SM = cc_rh_subscription.SubscriptionManager
+
+        self.config = {'rh_subscription':
+                       {'username': 'scooby@do.com',
+                        'password': 'scooby-snacks'
+                        }}
+        self.config_full = {'rh_subscription':
+                            {'username': 'scooby@do.com',
+                             'password': 'scooby-snacks',
+                             'auto-attach': True,
+                             'service-level': 'self-support',
+                             'add-pool': ['pool1', 'pool2', 'pool3'],
+                             'enable-repo': ['repo1', 'repo2', 'repo3'],
+                             'disable-repo': ['repo4', 'repo5']
+                             }}
+
+    def test_already_registered(self):
+        '''
+        Emulates a system that is already registered. Ensure it gets
+        a non-ProcessExecution error from is_registered()
+        '''
+        good_message = 'System is already registered'
+        with mock.patch.object(cc_rh_subscription.SubscriptionManager,
+                               '_sub_man_cli') as mockobj:
+            self.log.info = mock.MagicMock(wraps=self.log.info)
+            self.handle(self.name, self.config, self.cloud_init,
+                        self.log, self.args)
+            self.assertEqual(mockobj.call_count, 1)
+            self.log.info.assert_called_with(good_message)
+
+    def test_simple_registration(self):
+        '''
+        Simple registration with username and password
+        '''
+        good_message = 'rh_subscription plugin completed successfully'
+        self.log.info = mock.MagicMock(wraps=self.log.info)
+        reg = "The system has been registered with ID:" \
+              " 12345678-abde-abcde-1234-1234567890abc"
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (reg, 'bar')])
+        self.handle(self.name, self.config, self.cloud_init,
+                    self.log, self.args)
+        self.SM._sub_man_cli.assert_called_with_once(['subscription-manager',
+                                                      'identity'])
+        self.SM._sub_man_cli.assert_called_with_once(
+            ['subscription-manager', 'register', '--username=scooby@do.com',
+             '--password=scooby-snacks'], logstring_val=True)
+
+        self.log.info.assert_called_with(good_message)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+
+    def test_full_registration(self):
+        '''
+        Registration with auto-attach, service-level, adding pools,
+        and enabling and disabling yum repos
+        '''
+        pool_message = 'Pool pool2 is not available'
+        repo_message1 = 'Repo repo1 is already enabled'
+        repo_message2 = 'Enabled the following repos: repo2, repo3'
+        good_message = 'rh_subscription plugin completed successfully'
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.log.debug = mock.MagicMock(wraps=self.log.debug)
+        reg = "The system has been registered with ID:" \
+              " 12345678-abde-abcde-1234-1234567890abc"
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (reg, 'bar'),
+                         ('Service level set to: self-support', ''),
+                         ('pool1\npool3\n', ''), ('pool2\n', ''), ('', ''),
+                         ('Repo ID: repo1\nRepo ID: repo5\n', ''),
+                         ('Repo ID: repo2\nRepo ID: repo3\nRepo ID: '
+                          'repo4', ''),
+                         ('', '')])
+        self.handle(self.name, self.config_full, self.cloud_init,
+                    self.log, self.args)
+        self.log.warn.assert_any_call(pool_message)
+        self.log.debug.assert_any_call(repo_message1)
+        self.log.debug.assert_any_call(repo_message2)
+        self.log.info.assert_any_call(good_message)
+        self.SM._sub_man_cli.assert_called_with_once(['subscription-manager',
+                                                      'attach', '-pool=pool1',
+                                                      '--pool=pool33'])
+        self.assertEqual(self.SM._sub_man_cli.call_count, 9)
+
+
+class BadTests(unittest.TestCase):
+    def setUp(self):
+        super(BadTests, self).setUp()
+        self.name = "cc_rh_subscription"
+        self.cloud_init = None
+        self.log = logging.getLogger("bad_tests")
+        self.orig = self.log
+        self.args = []
+        self.handle = cc_rh_subscription.handle
+        self.SM = cc_rh_subscription.SubscriptionManager
+        self.reg = "The system has been registered with ID:" \
+                   " 12345678-abde-abcde-1234-1234567890abc"
+
+        self.config_no_password = {'rh_subscription':
+                                   {'username': 'scooby@do.com'
+                                    }}
+
+        self.config_no_key = {'rh_subscription':
+                              {'activation-key': '1234abcde',
+                               }}
+
+        self.config_service = {'rh_subscription':
+                               {'username': 'scooby@do.com',
+                                'password': 'scooby-snacks',
+                                'service-level': 'self-support'
+                                }}
+
+        self.config_badpool = {'rh_subscription':
+                               {'username': 'scooby@do.com',
+                                'password': 'scooby-snacks',
+                                'add-pool': 'not_a_list'
+                                }}
+        self.config_badrepo = {'rh_subscription':
+                               {'username': 'scooby@do.com',
+                                'password': 'scooby-snacks',
+                                'enable-repo': 'not_a_list'
+                                }}
+        self.config_badkey = {'rh_subscription':
+                              {'activation_key': 'abcdef1234',
+                               'org': '123',
+                               }}
+
+    def test_no_password(self):
+        '''
+        Attempt to register without the password key/value
+        '''
+        self.missing_info(self.config_no_password)
+
+    def test_no_org(self):
+        '''
+        Attempt to register without the org key/value
+        '''
+        self.missing_info(self.config_no_key)
+
+    def test_service_level_without_auto(self):
+        '''
+        Attempt to register using service-level without the auto-attach key
+        '''
+        good_message = 'The service-level key must be used in conjunction'\
+                       ' with the auto-attach key.  Please re-run with '\
+                       'auto-attach: True'
+
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+        self.handle(self.name, self.config_service, self.cloud_init,
+                    self.log, self.args)
+        self.log.warn.assert_any_call(good_message)
+        self.assertRaises(cc_rh_subscription.SubscriptionError)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 1)
+
+    def test_pool_not_a_list(self):
+        '''
+        Register with pools that are not in the format of a list
+        '''
+        good_message = "Pools must in the format of a list"
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+        self.handle(self.name, self.config_badpool, self.cloud_init,
+                    self.log, self.args)
+        self.log.warn.assert_any_call(good_message)
+        self.assertRaises(cc_rh_subscription.SubscriptionError)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+
+    def test_repo_not_a_list(self):
+        '''
+        Register with repos that are not in the format of a list
+        '''
+        good_message = "Repo IDs must in the format of a list."
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+        self.handle(self.name, self.config_badrepo, self.cloud_init,
+                    self.log, self.args)
+        self.log.warn.assert_any_call(good_message)
+        self.assertRaises(cc_rh_subscription.SubscriptionError)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+
+    def test_bad_key_value(self):
+        '''
+        Attempt to register with a key that we don't know
+        '''
+        good_message = 'activation_key is not a valid key for rh_subscription.'\
+                       ' Valid keys are: org, activation-key, username, '\
+                       'password, disable-repo, enable-repo, add-pool,'\
+                       ' rhsm-baseurl, server-hostname, auto-attach, '\
+                       'service-level'
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
+        self.handle(self.name, self.config_badkey, self.cloud_init,
+                    self.log, self.args)
+        self.assertRaises(cc_rh_subscription.SubscriptionError)
+        self.log.warn.assert_any_call(good_message)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 1)
+
+    def missing_info(self, config):
+        '''
+        Helper def for tests that having missing information
+        '''
+        good_message = "Unable to register system due to incomplete "\
+                       "information."
+        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM._sub_man_cli = mock.MagicMock(
+            side_effect=[util.ProcessExecutionError])
+        self.handle(self.name, config, self.cloud_init,
+                    self.log, self.args)
+        self.SM._sub_man_cli.assert_called_with(['subscription-manager',
+                                                 'identity'])
+        self.log.warn.assert_any_call(good_message)
+        self.assertEqual(self.SM._sub_man_cli.call_count, 1)
-- 
cgit v1.2.3


From 8af1802c9971ec1f2ebac23e9b42d5b42f43afae Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Fri, 22 May 2015 10:28:17 -0600
Subject: AZURE: Redact on-disk user password in /var/lib/ovf-env.xml     The
 fabric provides the user password in plain text via the CDROM,     and
 cloud-init has previously wrote the ovf-env.xml in /var/lib/waagent     with
 the password in plain text. This change redacts the password.

---
 cloudinit/sources/DataSourceAzure.py          | 28 ++++++++--
 tests/unittests/test_datasource/test_azure.py | 73 ++++++++++++++++++++++++---
 2 files changed, 91 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index f2388c63..d0a882ca 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -23,6 +23,8 @@ import fnmatch
 import os
 import os.path
 import time
+import xml.etree.ElementTree as ET
+
 from xml.dom import minidom
 
 from cloudinit import log as logging
@@ -68,6 +70,10 @@ BUILTIN_CLOUD_CONFIG = {
 DS_CFG_PATH = ['datasource', DS_NAME]
 DEF_EPHEMERAL_LABEL = 'Temporary Storage'
 
+# The redacted password fails to meet password complexity requirements
+# so we can safely use this to mask/redact the password in the ovf-env.xml
+DEF_PASSWD_REDACTION = 'REDACTED'
+
 
 def get_hostname(hostname_command='hostname'):
     return util.subp(hostname_command, capture=True)[0].strip()
@@ -414,14 +420,30 @@ def wait_for_files(flist, maxwait=60, naplen=.5):
 
 
 def write_files(datadir, files, dirmode=None):
+
+    def _redact_password(cnt, fname):
+        """Azure provides the UserPassword in plain text. So we redact it"""
+        try:
+            root = ET.fromstring(cnt)
+            for elem in root.iter():
+                if ('UserPassword' in elem.tag and
+                   elem.text != DEF_PASSWD_REDACTION):
+                    elem.text = DEF_PASSWD_REDACTION
+            return ET.tostring(root)
+        except Exception as e:
+            LOG.critical("failed to redact userpassword in {}".format(fname))
+            return cnt
+
     if not datadir:
         return
     if not files:
         files = {}
     util.ensure_dir(datadir, dirmode)
     for (name, content) in files.items():
-        util.write_file(filename=os.path.join(datadir, name),
-                        content=content, mode=0o600)
+        fname = os.path.join(datadir, name)
+        if 'ovf-env.xml' in name:
+            content = _redact_password(content, fname)
+        util.write_file(filename=fname, content=content, mode=0o600)
 
 
 def invoke_agent(cmd):
@@ -576,7 +598,7 @@ def read_azure_ovf(contents):
     defuser = {}
     if username:
         defuser['name'] = username
-    if password:
+    if password and DEF_PASSWD_REDACTION != password:
         defuser['passwd'] = encrypt_pass(password)
         defuser['lock_passwd'] = False
 
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 4c4b8eec..33b971f6 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -18,6 +18,7 @@ import stat
 import yaml
 import shutil
 import tempfile
+import xml.etree.ElementTree as ET
 
 
 def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
@@ -144,6 +145,39 @@ class TestAzureDataSource(TestCase):
 
         return dsrc
 
+    def xml_equals(self, oxml, nxml):
+        """Compare two sets of XML to make sure they are equal"""
+
+        def create_tag_index(xml):
+            et = ET.fromstring(xml)
+            ret = {}
+            for x in et.iter():
+                ret[x.tag] = x
+            return ret
+
+        def tags_exists(x, y):
+            for tag in x.keys():
+                self.assertIn(tag, y)
+            for tag in y.keys():
+                self.assertIn(tag, x)
+
+        def tags_equal(x, y):
+            for x_tag, x_val in x.items():
+                y_val = y.get(x_val.tag)
+                self.assertEquals(x_val.text, y_val.text)
+
+        old_cnt = create_tag_index(oxml)
+        new_cnt = create_tag_index(nxml)
+        tags_exists(old_cnt, new_cnt)
+        tags_equal(old_cnt, new_cnt)
+
+    def xml_notequals(self, oxml, nxml):
+        try:
+            self.xml_equals(oxml, nxml)
+        except AssertionError as e:
+            return
+        raise AssertionError("XML is the same")
+
     def test_basic_seed_dir(self):
         odata = {'HostName': "myhost", 'UserName': "myuser"}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata),
@@ -322,6 +356,31 @@ class TestAzureDataSource(TestCase):
 
         self.assertEqual(userdata.encode('us-ascii'), dsrc.userdata_raw)
 
+    def test_password_redacted_in_ovf(self):
+        odata = {'HostName': "myhost", 'UserName': "myuser",
+                 'UserPassword': "mypass"}
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
+        dsrc = self._get_ds(data)
+        ret = dsrc.get_data()
+
+        self.assertTrue(ret)
+        ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
+
+        # The XML should not be same since the user password is redacted
+        on_disk_ovf = load_file(ovf_env_path)
+        self.xml_notequals(data['ovfcontent'], on_disk_ovf)
+
+        # Make sure that the redacted password on disk is not used by CI
+        self.assertNotEquals(dsrc.cfg.get('password'),
+                             DataSourceAzure.DEF_PASSWD_REDACTION)
+
+        # Make sure that the password was really encrypted
+        et = ET.fromstring(on_disk_ovf)
+        for elem in et.iter():
+            if 'UserPassword' in elem.tag:
+                self.assertEquals(DataSourceAzure.DEF_PASSWD_REDACTION,
+                                  elem.text)
+
     def test_ovf_env_arrives_in_waagent_dir(self):
         xml = construct_valid_ovf_env(data={}, userdata="FOODATA")
         dsrc = self._get_ds({'ovfcontent': xml})
@@ -331,7 +390,7 @@ class TestAzureDataSource(TestCase):
         # we expect that the ovf-env.xml file is copied there.
         ovf_env_path = os.path.join(self.waagent_d, 'ovf-env.xml')
         self.assertTrue(os.path.exists(ovf_env_path))
-        self.assertEqual(xml, load_file(ovf_env_path))
+        self.xml_equals(xml, load_file(ovf_env_path))
 
     def test_ovf_can_include_unicode(self):
         xml = construct_valid_ovf_env(data={})
@@ -380,12 +439,12 @@ class TestAzureDataSource(TestCase):
         self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA")
         self.assertTrue(os.path.exists(
             os.path.join(self.waagent_d, 'otherfile')))
-        self.assertFalse(
-            os.path.exists(os.path.join(self.waagent_d, 'SharedConfig.xml')))
-        self.assertTrue(
-            os.path.exists(os.path.join(self.waagent_d, 'ovf-env.xml')))
-        self.assertEqual(new_ovfenv,
-            load_file(os.path.join(self.waagent_d, 'ovf-env.xml')))
+        self.assertFalse(os.path.exists(
+                        os.path.join(self.waagent_d, 'SharedConfig.xml')))
+        self.assertTrue(os.path.exists(
+                        os.path.join(self.waagent_d, 'ovf-env.xml')))
+        new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))
+        self.xml_equals(new_ovfenv, new_xml)
 
     def test_exception_fetching_fabric_data_doesnt_propagate(self):
         ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
-- 
cgit v1.2.3


From d9470a429935d4a5e12a5a3d1f57867362f92c57 Mon Sep 17 00:00:00 2001
From: Brent Baude <bbaude@redhat.com>
Date: Wed, 27 May 2015 13:01:35 -0500
Subject: Updated files with upstream review comments thanks to Dan and Scott

---
 cloudinit/config/cc_rh_subscription.py  | 108 ++++++++++----------
 tests/unittests/test_rh_subscription.py | 169 ++++++++++++++------------------
 2 files changed, 128 insertions(+), 149 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 00a88456..db3d5525 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -1,6 +1,6 @@
 # vi: ts=4 expandtab
 #
-#    Copyright (C) Red Hat, Inc.
+#    Copyright (C) 2015 Red Hat, Inc.
 #
 #    Author: Brent Baude <bbaude@redhat.com>
 #
@@ -16,7 +16,6 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import itertools
 from cloudinit import util
 
 
@@ -51,10 +50,10 @@ def handle(_name, cfg, _cloud, log, _args):
                     sm.log.debug("Completed auto-attach")
 
             if sm.pools is not None:
-                if type(sm.pools) is not list:
+                if not isinstance(sm.pools, (list)):
                     pool_fail = "Pools must in the format of a list"
                     raise SubscriptionError(pool_fail)
-                                            
+
                 return_stat = sm.addPool(sm.pools)
                 if not return_stat:
                     raise SubscriptionError("Unable to attach pools {0}"
@@ -63,12 +62,12 @@ def handle(_name, cfg, _cloud, log, _args):
                 return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
                 if not return_stat:
                     raise SubscriptionError("Unable to add or remove repos")
-            sm.log.info("rh_subscription plugin completed successfully")
+            sm.log_sucess("rh_subscription plugin completed successfully")
         except SubscriptionError as e:
-            sm.log.warn(str(e))
-            sm.log.warn("rh_subscription plugin did not complete successfully")
+            sm.log_warn(str(e))
+            sm.log_warn("rh_subscription plugin did not complete successfully")
     else:
-        sm.log.info("System is already registered")
+        sm.log_sucess("System is already registered")
 
 
 class SubscriptionError(Exception):
@@ -76,6 +75,11 @@ class SubscriptionError(Exception):
 
 
 class SubscriptionManager(object):
+    valid_rh_keys = ['org', 'activation-key', 'username', 'password',
+                     'disable-repo', 'enable-repo', 'add-pool',
+                     'rhsm-baseurl', 'server-hostname',
+                     'auto-attach', 'service-level']
+
     def __init__(self, cfg):
         self.cfg = cfg
         self.rhel_cfg = self.cfg.get('rh_subscription', {})
@@ -91,12 +95,16 @@ class SubscriptionManager(object):
         self.disable_repo = self.rhel_cfg.get('disable-repo')
         self.servicelevel = self.rhel_cfg.get('service-level')
         self.subman = ['subscription-manager']
-        self.valid_rh_keys = ['org', 'activation-key', 'username', 'password',
-                              'disable-repo', 'enable-repo', 'add-pool',
-                              'rhsm-baseurl', 'server-hostname',
-                              'auto-attach', 'service-level']
         self.is_registered = self._is_registered()
 
+    def log_sucess(self, msg):
+        '''Simple wrapper for logging info messages. Useful for unittests'''
+        self.log.info(msg)
+
+    def log_warn(self, msg):
+        '''Simple wrapper for logging warning messages. Useful for unittests'''
+        self.log.warn(msg)
+
     def _verify_keys(self):
         '''
         Checks that the keys in the rh_subscription dict from the user-data
@@ -112,14 +120,15 @@ class SubscriptionManager(object):
 
         # Check for bad auto-attach value
         if (self.auto_attach is not None) and \
-                (str(self.auto_attach).upper() not in ['TRUE', 'FALSE']):
+                not (util.is_true(self.auto_attach) or
+                     util.is_false(self.auto_attach)):
             not_bool = "The key auto-attach must be a value of "\
                        "either True or False"
             return False, not_bool
 
         if (self.servicelevel is not None) and \
-            ((not self.auto_attach) or
-                (str(self.auto_attach).upper() == "FALSE")):
+                ((not self.auto_attach)
+                 or (util.is_false(str(self.auto_attach)))):
 
             no_auto = "The service-level key must be used in conjunction with "\
                       "the auto-attach key.  Please re-run with auto-attach: "\
@@ -132,7 +141,7 @@ class SubscriptionManager(object):
         Checks if the system is already registered and returns
         True if so, else False
         '''
-        cmd = list(itertools.chain(self.subman, ['identity']))
+        cmd = ['identity']
 
         try:
             self._sub_man_cli(cmd)
@@ -147,9 +156,8 @@ class SubscriptionManager(object):
         and runs subscription-manager.  Breaking this to a
         separate function for later use in mocking and unittests
         '''
-        return_out, return_err = util.subp(cmd, logstring=logstring_val)
-
-        return return_out, return_err
+        cmd = self.subman + cmd
+        return util.subp(cmd, logstring=logstring_val)
 
     def rhn_register(self):
         '''
@@ -159,10 +167,8 @@ class SubscriptionManager(object):
 
         if (self.activation_key is not None) and (self.org is not None):
             # register by activation key
-            cmd = list(itertools.chain(self.subman, ['register',
-                                       '--activationkey={0}'.
-                       format(self.activation_key),
-                       '--org={0}'.format(self.org)]))
+            cmd = ['register', '--activationkey={0}'.
+                   format(self.activation_key), '--org={0}'.format(self.org)]
 
             # If the baseurl and/or server url are passed in, we register
             # with them.
@@ -178,15 +184,14 @@ class SubscriptionManager(object):
                                                            logstring_val=True)
             except util.ProcessExecutionError as e:
                 if e.stdout == "":
-                    self.log.warn("Registration failed due "
+                    self.log_warn("Registration failed due "
                                   "to: {0}".format(e.stderr))
                 return False
 
         elif (self.userid is not None) and (self.password is not None):
             # register by username and password
-            cmd = list(itertools.chain(self.subman, ['register',
-                       '--username={0}'.format(self.userid),
-                       '--password={0}'.format(self.password)]))
+            cmd = ['register', '--username={0}'.format(self.userid),
+                   '--password={0}'.format(self.password)]
 
             # If the baseurl and/or server url are passed in, we register
             # with them.
@@ -203,14 +208,14 @@ class SubscriptionManager(object):
                                                            logstring_val=True)
             except util.ProcessExecutionError as e:
                 if e.stdout == "":
-                    self.log.warn("Registration failed due "
+                    self.log_warn("Registration failed due "
                                   "to: {0}".format(e.stderr))
                 return False
 
         else:
-            self.log.warn("Unable to register system due to incomplete "
+            self.log_warn("Unable to register system due to incomplete "
                           "information.")
-            self.log.warn("Use either activationkey and org *or* userid "
+            self.log_warn("Use either activationkey and org *or* userid "
                           "and password")
             return False
 
@@ -219,9 +224,8 @@ class SubscriptionManager(object):
         return True
 
     def _set_service_level(self):
-        cmd = list(itertools.chain(self.subman,
-                                   ['attach', '--auto', '--servicelevel={0}'
-                                    .format(self.servicelevel)]))
+        cmd = ['attach', '--auto', '--servicelevel={0}'
+               .format(self.servicelevel)]
 
         try:
             return_out, return_err = self._sub_man_cli(cmd)
@@ -229,9 +233,9 @@ class SubscriptionManager(object):
             if e.stdout.rstrip() != '':
                 for line in e.stdout.split("\n"):
                     if line is not '':
-                        self.log.warn(line)
+                        self.log_warn(line)
             else:
-                self.log.warn("Setting the service level failed with: "
+                self.log_warn("Setting the service level failed with: "
                               "{0}".format(e.stderr.strip()))
             return False
         for line in return_out.split("\n"):
@@ -240,11 +244,11 @@ class SubscriptionManager(object):
         return True
 
     def _set_auto_attach(self):
-        cmd = list(itertools.chain(self.subman, ['attach', '--auto']))
+        cmd = ['attach', '--auto']
         try:
             return_out, return_err = self._sub_man_cli(cmd)
         except util.ProcessExecutionError:
-            self.log.warn("Auto-attach failed with: "
+            self.log_warn("Auto-attach failed with: "
                           "{0}]".format(return_err.strip()))
             return False
         for line in return_out.split("\n"):
@@ -261,14 +265,12 @@ class SubscriptionManager(object):
         consumed = []
 
         # Get all available pools
-        cmd = list(itertools.chain(self.subman, ['list', '--available',
-                                                 '--pool-only']))
+        cmd = ['list', '--available', '--pool-only']
         results, errors = self._sub_man_cli(cmd)
         available = (results.rstrip()).split("\n")
 
         # Get all consumed pools
-        cmd = list(itertools.chain(self.subman, ['list', '--consumed',
-                                                 '--pool-only']))
+        cmd = ['list', '--consumed', '--pool-only']
         results, errors = self._sub_man_cli(cmd)
         consumed = (results.rstrip()).split("\n")
 
@@ -280,14 +282,14 @@ class SubscriptionManager(object):
         them in list form.
         '''
 
-        cmd = list(itertools.chain(self.subman, ['repos', '--list-enabled']))
+        cmd = ['repos', '--list-enabled']
         return_out, return_err = self._sub_man_cli(cmd)
         active_repos = []
         for repo in return_out.split("\n"):
             if "Repo ID:" in repo:
                 active_repos.append((repo.split(':')[1]).strip())
 
-        cmd = list(itertools.chain(self.subman, ['repos', '--list-disabled']))
+        cmd = ['repos', '--list-disabled']
         return_out, return_err = self._sub_man_cli(cmd)
 
         inactive_repos = []
@@ -309,12 +311,12 @@ class SubscriptionManager(object):
 
         pool_available, pool_consumed = self._getPools()
         pool_list = []
-        cmd = list(itertools.chain(self.subman, ['attach']))
+        cmd = ['attach']
         for pool in pools:
             if (pool not in pool_consumed) and (pool in pool_available):
                 pool_list.append('--pool={0}'.format(pool))
             else:
-                self.log.warn("Pool {0} is not available".format(pool))
+                self.log_warn("Pool {0} is not available".format(pool))
         if len(pool_list) > 0:
             cmd.extend(pool_list)
             try:
@@ -324,7 +326,7 @@ class SubscriptionManager(object):
                                .replace('--pool=', ''))
                 return True
             except util.ProcessExecutionError as e:
-                self.log.warn("Unable to attach pool {0} "
+                self.log_warn("Unable to attach pool {0} "
                               "due to {1}".format(pool, e))
                 return False
 
@@ -335,12 +337,12 @@ class SubscriptionManager(object):
         executes the action to disable or enable
         '''
 
-        if (erepos is not None) and (type(erepos) is not list):
-            self.log.warn("Repo IDs must in the format of a list.")
+        if (erepos is not None) and (not isinstance(erepos, (list))):
+            self.log_warn("Repo IDs must in the format of a list.")
             return False
 
-        if (drepos is not None) and (type(drepos) is not list):
-            self.log.warn("Repo IDs must in the format of a list.")
+        if (drepos is not None) and (not isinstance(drepos, (list))):
+            self.log_warn("Repo IDs must in the format of a list.")
             return False
 
         # Bail if both lists are not populated
@@ -374,14 +376,14 @@ class SubscriptionManager(object):
                 if fail in active_repos:
                     self.log.debug("Repo {0} is already enabled".format(fail))
                 else:
-                    self.log.warn("Repo {0} does not appear to "
+                    self.log_warn("Repo {0} does not appear to "
                                   "exist".format(fail))
         if len(disable_list_fail) > 0:
             for fail in disable_list_fail:
                 self.log.debug("Repo {0} not disabled "
                                "because it is not enabled".format(fail))
 
-        cmd = list(itertools.chain(self.subman, ['repos']))
+        cmd = ['repos']
         if enable_list > 0:
             cmd.extend(enable_list)
         if disable_list > 0:
@@ -390,7 +392,7 @@ class SubscriptionManager(object):
         try:
             self._sub_man_cli(cmd)
         except util.ProcessExecutionError as e:
-            self.log.warn("Unable to alter repos due to {0}".format(e))
+            self.log_warn("Unable to alter repos due to {0}".format(e))
             return False
 
         if enable_list > 0:
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index ba9181ec..2f813f41 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -34,34 +34,33 @@ class GoodTests(unittest.TestCase):
         Emulates a system that is already registered. Ensure it gets
         a non-ProcessExecution error from is_registered()
         '''
-        good_message = 'System is already registered'
         with mock.patch.object(cc_rh_subscription.SubscriptionManager,
                                '_sub_man_cli') as mockobj:
-            self.log.info = mock.MagicMock(wraps=self.log.info)
+            self.SM.log_sucess = mock.MagicMock()
             self.handle(self.name, self.config, self.cloud_init,
                         self.log, self.args)
+            self.assertEqual(self.SM.log_sucess.call_count, 1)
             self.assertEqual(mockobj.call_count, 1)
-            self.log.info.assert_called_with(good_message)
 
     def test_simple_registration(self):
         '''
         Simple registration with username and password
         '''
-        good_message = 'rh_subscription plugin completed successfully'
-        self.log.info = mock.MagicMock(wraps=self.log.info)
+        self.SM.log_sucess = mock.MagicMock()
         reg = "The system has been registered with ID:" \
               " 12345678-abde-abcde-1234-1234567890abc"
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError, (reg, 'bar')])
         self.handle(self.name, self.config, self.cloud_init,
                     self.log, self.args)
-        self.SM._sub_man_cli.assert_called_with_once(['subscription-manager',
-                                                      'identity'])
-        self.SM._sub_man_cli.assert_called_with_once(
-            ['subscription-manager', 'register', '--username=scooby@do.com',
-             '--password=scooby-snacks'], logstring_val=True)
-
-        self.log.info.assert_called_with(good_message)
+        self.assertIn(mock.call(['identity']),
+                      self.SM._sub_man_cli.call_args_list)
+        self.assertIn(mock.call(['register', '--username=scooby@do.com',
+                                 '--password=scooby-snacks'],
+                                logstring_val=True),
+                      self.SM._sub_man_cli.call_args_list)
+
+        self.assertEqual(self.SM.log_sucess.call_count, 1)
         self.assertEqual(self.SM._sub_man_cli.call_count, 2)
 
     def test_full_registration(self):
@@ -69,12 +68,12 @@ class GoodTests(unittest.TestCase):
         Registration with auto-attach, service-level, adding pools,
         and enabling and disabling yum repos
         '''
-        pool_message = 'Pool pool2 is not available'
-        repo_message1 = 'Repo repo1 is already enabled'
-        repo_message2 = 'Enabled the following repos: repo2, repo3'
-        good_message = 'rh_subscription plugin completed successfully'
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
-        self.log.debug = mock.MagicMock(wraps=self.log.debug)
+        call_lists = []
+        call_lists.append(['attach', '--pool=pool1', '--pool=pool3'])
+        call_lists.append(['repos', '--enable=repo2', '--enable=repo3',
+                           '--disable=repo5'])
+        call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
+        self.SM.log_sucess = mock.MagicMock()
         reg = "The system has been registered with ID:" \
               " 12345678-abde-abcde-1234-1234567890abc"
         self.SM._sub_man_cli = mock.MagicMock(
@@ -87,145 +86,123 @@ class GoodTests(unittest.TestCase):
                          ('', '')])
         self.handle(self.name, self.config_full, self.cloud_init,
                     self.log, self.args)
-        self.log.warn.assert_any_call(pool_message)
-        self.log.debug.assert_any_call(repo_message1)
-        self.log.debug.assert_any_call(repo_message2)
-        self.log.info.assert_any_call(good_message)
-        self.SM._sub_man_cli.assert_called_with_once(['subscription-manager',
-                                                      'attach', '-pool=pool1',
-                                                      '--pool=pool33'])
+        for call in call_lists:
+            self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
+        self.assertEqual(self.SM.log_sucess.call_count, 1)
         self.assertEqual(self.SM._sub_man_cli.call_count, 9)
 
 
-class BadTests(unittest.TestCase):
+class TestBadInput(unittest.TestCase):
+    name = "cc_rh_subscription"
+    cloud_init = None
+    log = logging.getLogger("bad_tests")
+    args = []
+    SM = cc_rh_subscription.SubscriptionManager
+    reg = "The system has been registered with ID:" \
+          " 12345678-abde-abcde-1234-1234567890abc"
+
+    config_no_password = {'rh_subscription':
+                          {'username': 'scooby@do.com'
+                           }}
+
+    config_no_key = {'rh_subscription':
+                     {'activation-key': '1234abcde',
+                      }}
+
+    config_service = {'rh_subscription':
+                      {'username': 'scooby@do.com',
+                       'password': 'scooby-snacks',
+                       'service-level': 'self-support'
+                       }}
+
+    config_badpool = {'rh_subscription':
+                      {'username': 'scooby@do.com',
+                       'password': 'scooby-snacks',
+                       'add-pool': 'not_a_list'
+                       }}
+    config_badrepo = {'rh_subscription':
+                      {'username': 'scooby@do.com',
+                       'password': 'scooby-snacks',
+                       'enable-repo': 'not_a_list'
+                       }}
+    config_badkey = {'rh_subscription':
+                     {'activation_key': 'abcdef1234',
+                      'org': '123',
+                      }}
+
     def setUp(self):
-        super(BadTests, self).setUp()
-        self.name = "cc_rh_subscription"
-        self.cloud_init = None
-        self.log = logging.getLogger("bad_tests")
-        self.orig = self.log
-        self.args = []
+        super(TestBadInput, self).setUp()
         self.handle = cc_rh_subscription.handle
-        self.SM = cc_rh_subscription.SubscriptionManager
-        self.reg = "The system has been registered with ID:" \
-                   " 12345678-abde-abcde-1234-1234567890abc"
-
-        self.config_no_password = {'rh_subscription':
-                                   {'username': 'scooby@do.com'
-                                    }}
-
-        self.config_no_key = {'rh_subscription':
-                              {'activation-key': '1234abcde',
-                               }}
-
-        self.config_service = {'rh_subscription':
-                               {'username': 'scooby@do.com',
-                                'password': 'scooby-snacks',
-                                'service-level': 'self-support'
-                                }}
-
-        self.config_badpool = {'rh_subscription':
-                               {'username': 'scooby@do.com',
-                                'password': 'scooby-snacks',
-                                'add-pool': 'not_a_list'
-                                }}
-        self.config_badrepo = {'rh_subscription':
-                               {'username': 'scooby@do.com',
-                                'password': 'scooby-snacks',
-                                'enable-repo': 'not_a_list'
-                                }}
-        self.config_badkey = {'rh_subscription':
-                              {'activation_key': 'abcdef1234',
-                               'org': '123',
-                               }}
 
     def test_no_password(self):
         '''
         Attempt to register without the password key/value
         '''
-        self.missing_info(self.config_no_password)
+        self.input_is_missing_data(self.config_no_password)
 
     def test_no_org(self):
         '''
         Attempt to register without the org key/value
         '''
-        self.missing_info(self.config_no_key)
+        self.input_is_missing_data(self.config_no_key)
 
     def test_service_level_without_auto(self):
         '''
         Attempt to register using service-level without the auto-attach key
         '''
-        good_message = 'The service-level key must be used in conjunction'\
-                       ' with the auto-attach key.  Please re-run with '\
-                       'auto-attach: True'
-
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM.log_warn = mock.MagicMock()
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
         self.handle(self.name, self.config_service, self.cloud_init,
                     self.log, self.args)
-        self.log.warn.assert_any_call(good_message)
-        self.assertRaises(cc_rh_subscription.SubscriptionError)
         self.assertEqual(self.SM._sub_man_cli.call_count, 1)
+        self.assertEqual(self.SM.log_warn.call_count, 2)
 
     def test_pool_not_a_list(self):
         '''
         Register with pools that are not in the format of a list
         '''
-        good_message = "Pools must in the format of a list"
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM.log_warn = mock.MagicMock()
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
         self.handle(self.name, self.config_badpool, self.cloud_init,
                     self.log, self.args)
-        self.log.warn.assert_any_call(good_message)
-        self.assertRaises(cc_rh_subscription.SubscriptionError)
         self.assertEqual(self.SM._sub_man_cli.call_count, 2)
+        self.assertEqual(self.SM.log_warn.call_count, 2)
 
     def test_repo_not_a_list(self):
         '''
         Register with repos that are not in the format of a list
         '''
-        good_message = "Repo IDs must in the format of a list."
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM.log_warn = mock.MagicMock()
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
         self.handle(self.name, self.config_badrepo, self.cloud_init,
                     self.log, self.args)
-        self.log.warn.assert_any_call(good_message)
-        self.assertRaises(cc_rh_subscription.SubscriptionError)
+        self.assertEqual(self.SM.log_warn.call_count, 3)
         self.assertEqual(self.SM._sub_man_cli.call_count, 2)
 
     def test_bad_key_value(self):
         '''
         Attempt to register with a key that we don't know
         '''
-        good_message = 'activation_key is not a valid key for rh_subscription.'\
-                       ' Valid keys are: org, activation-key, username, '\
-                       'password, disable-repo, enable-repo, add-pool,'\
-                       ' rhsm-baseurl, server-hostname, auto-attach, '\
-                       'service-level'
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM.log_warn = mock.MagicMock()
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError, (self.reg, 'bar')])
         self.handle(self.name, self.config_badkey, self.cloud_init,
                     self.log, self.args)
-        self.assertRaises(cc_rh_subscription.SubscriptionError)
-        self.log.warn.assert_any_call(good_message)
+        self.assertEqual(self.SM.log_warn.call_count, 2)
         self.assertEqual(self.SM._sub_man_cli.call_count, 1)
 
-    def missing_info(self, config):
+    def input_is_missing_data(self, config):
         '''
         Helper def for tests that having missing information
         '''
-        good_message = "Unable to register system due to incomplete "\
-                       "information."
-        self.log.warn = mock.MagicMock(wraps=self.log.warn)
+        self.SM.log_warn = mock.MagicMock()
         self.SM._sub_man_cli = mock.MagicMock(
             side_effect=[util.ProcessExecutionError])
         self.handle(self.name, config, self.cloud_init,
                     self.log, self.args)
-        self.SM._sub_man_cli.assert_called_with(['subscription-manager',
-                                                 'identity'])
-        self.log.warn.assert_any_call(good_message)
+        self.SM._sub_man_cli.assert_called_with(['identity'])
+        self.assertEqual(self.SM.log_warn.call_count, 4)
         self.assertEqual(self.SM._sub_man_cli.call_count, 1)
-- 
cgit v1.2.3


From 3aa0fcc5983416d743fac6af1d40ca791feb23af Mon Sep 17 00:00:00 2001
From: Brent Baude <bbaude@redhat.com>
Date: Thu, 28 May 2015 09:02:11 -0500
Subject: Tightening up an error message and isinstance usage based on feedback
 from Dan

---
 cloudinit/config/cc_rh_subscription.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index db3d5525..e57e8a07 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -50,7 +50,7 @@ def handle(_name, cfg, _cloud, log, _args):
                     sm.log.debug("Completed auto-attach")
 
             if sm.pools is not None:
-                if not isinstance(sm.pools, (list)):
+                if not isinstance(sm.pools, list):
                     pool_fail = "Pools must in the format of a list"
                     raise SubscriptionError(pool_fail)
 
@@ -122,8 +122,8 @@ class SubscriptionManager(object):
         if (self.auto_attach is not None) and \
                 not (util.is_true(self.auto_attach) or
                      util.is_false(self.auto_attach)):
-            not_bool = "The key auto-attach must be a value of "\
-                       "either True or False"
+            not_bool = "The key auto-attach must be a boolean value "\
+                       "(True/False "
             return False, not_bool
 
         if (self.servicelevel is not None) and \
@@ -337,11 +337,11 @@ class SubscriptionManager(object):
         executes the action to disable or enable
         '''
 
-        if (erepos is not None) and (not isinstance(erepos, (list))):
+        if (erepos is not None) and (not isinstance(erepos, list)):
             self.log_warn("Repo IDs must in the format of a list.")
             return False
 
-        if (drepos is not None) and (not isinstance(drepos, (list))):
+        if (drepos is not None) and (not isinstance(drepos, list)):
             self.log_warn("Repo IDs must in the format of a list.")
             return False
 
-- 
cgit v1.2.3


From 3c01b8e48400697362f190984ab9c96dee27a369 Mon Sep 17 00:00:00 2001
From: Brent Baude <bbaude@redhat.com>
Date: Fri, 29 May 2015 09:18:49 -0500
Subject: Corrected spelling error on variable name

---
 cloudinit/config/cc_rh_subscription.py  |  6 +++---
 tests/unittests/test_rh_subscription.py | 12 ++++++------
 2 files changed, 9 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index e57e8a07..cabebca4 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -62,12 +62,12 @@ def handle(_name, cfg, _cloud, log, _args):
                 return_stat = sm.update_repos(sm.enable_repo, sm.disable_repo)
                 if not return_stat:
                     raise SubscriptionError("Unable to add or remove repos")
-            sm.log_sucess("rh_subscription plugin completed successfully")
+            sm.log_success("rh_subscription plugin completed successfully")
         except SubscriptionError as e:
             sm.log_warn(str(e))
             sm.log_warn("rh_subscription plugin did not complete successfully")
     else:
-        sm.log_sucess("System is already registered")
+        sm.log_success("System is already registered")
 
 
 class SubscriptionError(Exception):
@@ -97,7 +97,7 @@ class SubscriptionManager(object):
         self.subman = ['subscription-manager']
         self.is_registered = self._is_registered()
 
-    def log_sucess(self, msg):
+    def log_success(self, msg):
         '''Simple wrapper for logging info messages. Useful for unittests'''
         self.log.info(msg)
 
diff --git a/tests/unittests/test_rh_subscription.py b/tests/unittests/test_rh_subscription.py
index 2f813f41..38d5763a 100644
--- a/tests/unittests/test_rh_subscription.py
+++ b/tests/unittests/test_rh_subscription.py
@@ -36,17 +36,17 @@ class GoodTests(unittest.TestCase):
         '''
         with mock.patch.object(cc_rh_subscription.SubscriptionManager,
                                '_sub_man_cli') as mockobj:
-            self.SM.log_sucess = mock.MagicMock()
+            self.SM.log_success = mock.MagicMock()
             self.handle(self.name, self.config, self.cloud_init,
                         self.log, self.args)
-            self.assertEqual(self.SM.log_sucess.call_count, 1)
+            self.assertEqual(self.SM.log_success.call_count, 1)
             self.assertEqual(mockobj.call_count, 1)
 
     def test_simple_registration(self):
         '''
         Simple registration with username and password
         '''
-        self.SM.log_sucess = mock.MagicMock()
+        self.SM.log_success = mock.MagicMock()
         reg = "The system has been registered with ID:" \
               " 12345678-abde-abcde-1234-1234567890abc"
         self.SM._sub_man_cli = mock.MagicMock(
@@ -60,7 +60,7 @@ class GoodTests(unittest.TestCase):
                                 logstring_val=True),
                       self.SM._sub_man_cli.call_args_list)
 
-        self.assertEqual(self.SM.log_sucess.call_count, 1)
+        self.assertEqual(self.SM.log_success.call_count, 1)
         self.assertEqual(self.SM._sub_man_cli.call_count, 2)
 
     def test_full_registration(self):
@@ -73,7 +73,7 @@ class GoodTests(unittest.TestCase):
         call_lists.append(['repos', '--enable=repo2', '--enable=repo3',
                            '--disable=repo5'])
         call_lists.append(['attach', '--auto', '--servicelevel=self-support'])
-        self.SM.log_sucess = mock.MagicMock()
+        self.SM.log_success = mock.MagicMock()
         reg = "The system has been registered with ID:" \
               " 12345678-abde-abcde-1234-1234567890abc"
         self.SM._sub_man_cli = mock.MagicMock(
@@ -88,7 +88,7 @@ class GoodTests(unittest.TestCase):
                     self.log, self.args)
         for call in call_lists:
             self.assertIn(mock.call(call), self.SM._sub_man_cli.call_args_list)
-        self.assertEqual(self.SM.log_sucess.call_count, 1)
+        self.assertEqual(self.SM.log_success.call_count, 1)
         self.assertEqual(self.SM._sub_man_cli.call_count, 9)
 
 
-- 
cgit v1.2.3


From 04a5edaa33d6a7e64f95c04eceaa82eec12cb237 Mon Sep 17 00:00:00 2001
From: Lars Kellogg-Stedman <lars@redhat.com>
Date: Tue, 2 Jun 2015 16:27:57 -0400
Subject: check for systemd using sd_booted() semantics

The existing cloud-init code determines if systemd is in use by looking at the
distribution name and version. This is prone to error because:

- RHEL derivatives other than CentOS (e.g., Scientific Linux) will fail this test, and
- Distributions that are not derived from RHEL also use systemd

This patch makes cloud-init use the same logic that is used in systemd's
sd_booted() method
(http://www.freedesktop.org/software/systemd/man/sd_booted.html)

LP: #1461201
---
 cloudinit/distros/__init__.py | 8 ++++++++
 cloudinit/distros/rhel.py     | 8 --------
 2 files changed, 8 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index e0cce670..8a947867 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -27,6 +27,7 @@ from six import StringIO
 import abc
 import os
 import re
+import stat
 
 from cloudinit import importer
 from cloudinit import log as logging
@@ -89,6 +90,13 @@ class Distro(object):
         self._write_hostname(writeable_hostname, self.hostname_conf_fn)
         self._apply_hostname(writeable_hostname)
 
+    def uses_systemd(self):
+        try:
+            res = os.lstat('/run/systemd/system')
+            return stat.S_ISDIR(res.st_mode)
+        except:
+            return False
+
     @abc.abstractmethod
     def package_command(self, cmd, args=None, pkgs=None):
         raise NotImplementedError()
diff --git a/cloudinit/distros/rhel.py b/cloudinit/distros/rhel.py
index 30c805a6..812e7002 100644
--- a/cloudinit/distros/rhel.py
+++ b/cloudinit/distros/rhel.py
@@ -111,14 +111,6 @@ class Distro(distros.Distro):
             rhel_util.update_sysconfig_file(self.network_conf_fn, net_cfg)
         return dev_names
 
-    def uses_systemd(self):
-        # Fedora 18 and RHEL 7 were the first adopters in their series
-        (dist, vers) = util.system_info()['dist'][:2]
-        major = (int)(vers.split('.')[0])
-        return ((dist.startswith('Red Hat Enterprise Linux') and major >= 7)
-                or (dist.startswith('CentOS Linux') and major >= 7)
-                or (dist.startswith('Fedora') and major >= 18))
-
     def apply_locale(self, locale, out_fn=None):
         if self.uses_systemd():
             if not out_fn:
-- 
cgit v1.2.3


From ad403f27e1f8067d7709ed9b184589af8309ba15 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 15 Jun 2015 15:34:03 -0400
Subject: cc_rh_subscription: fixes for python3

---
 cloudinit/config/cc_rh_subscription.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index cabebca4..6da26d25 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -384,9 +384,9 @@ class SubscriptionManager(object):
                                "because it is not enabled".format(fail))
 
         cmd = ['repos']
-        if enable_list > 0:
+        if len(enable_list) > 0:
             cmd.extend(enable_list)
-        if disable_list > 0:
+        if len(disable_list) > 0:
             cmd.extend(disable_list)
 
         try:
@@ -395,10 +395,10 @@ class SubscriptionManager(object):
             self.log_warn("Unable to alter repos due to {0}".format(e))
             return False
 
-        if enable_list > 0:
+        if len(enable_list) > 0:
             self.log.debug("Enabled the following repos: %s" %
                            (", ".join(enable_list)).replace('--enable=', ''))
-        if disable_list > 0:
+        if len(disable_list) > 0:
             self.log.debug("Disabled the following repos: %s" %
                            (", ".join(disable_list)).replace('--disable=', ''))
         return True
-- 
cgit v1.2.3


From 66c13ab5aca67ca3aa3d1536154989f98b85107a Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 15 Jun 2015 17:20:51 -0400
Subject: apt_configure: fix importing of apt gpg keys under in python3

LP: #1463373
---
 ChangeLog                            | 1 +
 cloudinit/config/cc_apt_configure.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index cc12cb6e..6261147e 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -48,6 +48,7 @@
    [Lars Kellogg-Stedman]
  - Add an rh_subscription module to handle registration of Red Hat instances.
    [Brent Baude]
+ - cc_apt_configure: fix importing keys under python3 (LP: #1463373)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 2c51d116..9e9e9e26 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -109,7 +109,7 @@ def handle(name, cfg, cloud, log, _args):
 
 # get gpg keyid from keyserver
 def getkeybyid(keyid, keyserver):
-    with util.ExtendedTemporaryFile(suffix='.sh') as fh:
+    with util.ExtendedTemporaryFile(suffix='.sh', mode="w+", ) as fh:
         fh.write(EXPORT_GPG_KEYID)
         fh.flush()
         cmd = ['/bin/sh', fh.name, keyid, keyserver]
-- 
cgit v1.2.3


From 6e06afffed8614cb143e3a13bab5aa382ccbbce9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 16 Jun 2015 11:18:33 -0400
Subject: growpart: fix specification of 'devices' list.

given config:
 {'growpart': {'devices': ["/"]}}

the 'devices' was ignored, it was incorrectly read from the top
level non-namespaced location.

LP: #1465436
---
 ChangeLog                       | 1 +
 cloudinit/config/cc_growpart.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 6261147e..47b8dec2 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -49,6 +49,7 @@
  - Add an rh_subscription module to handle registration of Red Hat instances.
    [Brent Baude]
  - cc_apt_configure: fix importing keys under python3 (LP: #1463373)
+ - cc_growpart: fix specification of 'devices' list (LP: #1465436)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/config/cc_growpart.py b/cloudinit/config/cc_growpart.py
index f52c41f0..859d69f1 100644
--- a/cloudinit/config/cc_growpart.py
+++ b/cloudinit/config/cc_growpart.py
@@ -276,7 +276,7 @@ def handle(_name, cfg, _cloud, log, _args):
             log.debug("use ignore_growroot_disabled to ignore")
             return
 
-    devices = util.get_cfg_option_list(cfg, "devices", ["/"])
+    devices = util.get_cfg_option_list(mycfg, "devices", ["/"])
     if not len(devices):
         log.debug("growpart: empty device list")
         return
-- 
cgit v1.2.3


From ba7fc871f2e73e0adbf883ef8253180f41cdcfe8 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 16 Jun 2015 17:35:03 +0100
Subject: Use wget to fetch CloudStack passwords.

Different versions of the CloudStack password server respond
differently; wget handles these nicely for us, so it's easier to just
use wget.

LP: #1440263, #1464253
---
 cloudinit/sources/DataSourceCloudStack.py          | 35 +++++++---------------
 tests/unittests/test_datasource/test_cloudstack.py | 30 +++++++++----------
 2 files changed, 25 insertions(+), 40 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 7b32e1fa..d0cac5bb 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -29,8 +29,6 @@ import time
 from socket import inet_ntoa
 from struct import pack
 
-from six.moves import http_client
-
 from cloudinit import ec2_utils as ec2
 from cloudinit import log as logging
 from cloudinit import url_helper as uhelp
@@ -47,35 +45,22 @@ class CloudStackPasswordServerClient(object):
     has documentation about the system.  This implementation is following that
     found at
     https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian
-
-    The CloudStack password server is, essentially, a broken HTTP
-    server. It requires us to provide a valid HTTP request (including a
-    DomU_Request header, which is the meat of the request), but just
-    writes the text of its response on to the socket, without a status
-    line or any HTTP headers.  This makes HTTP libraries sad, which
-    explains the screwiness of the implementation of this class.
-
-    This should be fixed in CloudStack by commit
-    a72f14ea9cb832faaac946b3cf9f56856b50142a in December 2014.
     """
 
     def __init__(self, virtual_router_address):
         self.virtual_router_address = virtual_router_address
 
     def _do_request(self, domu_request):
-        # We have to provide a valid HTTP request, but a valid HTTP
-        # response is not returned. This means that getresponse() chokes,
-        # so we use the socket directly to read off the response.
-        # Because we're reading off the socket directly, we can't re-use the
-        # connection.
-        conn = http_client.HTTPConnection(self.virtual_router_address, 8080)
-        try:
-            conn.request('GET', '', headers={'DomU_Request': domu_request})
-            conn.sock.settimeout(30)
-            output = conn.sock.recv(1024).decode('utf-8').strip()
-        finally:
-            conn.close()
-        return output
+        # The password server was in the past, a broken HTTP server, but is now
+        # fixed.  wget handles this seamlessly, so it's easier to shell out to
+        # that rather than write our own handling code.
+        output, _ = util.subp([
+            'wget', '--quiet', '--tries', '3', '--timeout', '20',
+            '--output-document', '-', '--header',
+            'DomU_Request: {0}'.format(domu_request),
+            '{0}:8080'.format(self.virtual_router_address)
+        ])
+        return output.strip()
 
     def get_password(self):
         password = self._do_request('send_my_password')
diff --git a/tests/unittests/test_datasource/test_cloudstack.py b/tests/unittests/test_datasource/test_cloudstack.py
index 959d78ae..656d80d1 100644
--- a/tests/unittests/test_datasource/test_cloudstack.py
+++ b/tests/unittests/test_datasource/test_cloudstack.py
@@ -23,13 +23,11 @@ class TestCloudStackPasswordFetching(TestCase):
         self.patches.enter_context(mock.patch('{0}.uhelp'.format(mod_name)))
 
     def _set_password_server_response(self, response_string):
-        http_client = mock.MagicMock()
-        http_client.HTTPConnection.return_value.sock.recv.return_value = \
-            response_string.encode('utf-8')
+        subp = mock.MagicMock(return_value=(response_string, ''))
         self.patches.enter_context(
-            mock.patch('cloudinit.sources.DataSourceCloudStack.http_client',
-                       http_client))
-        return http_client
+            mock.patch('cloudinit.sources.DataSourceCloudStack.util.subp',
+                       subp))
+        return subp
 
     def test_empty_password_doesnt_create_config(self):
         self._set_password_server_response('')
@@ -55,26 +53,28 @@ class TestCloudStackPasswordFetching(TestCase):
         ds = DataSourceCloudStack({}, None, helpers.Paths({}))
         self.assertTrue(ds.get_data())
 
-    def assertRequestTypesSent(self, http_client, expected_request_types):
-        request_types = [
-            kwargs['headers']['DomU_Request']
-            for _, kwargs
-            in http_client.HTTPConnection.return_value.request.call_args_list]
+    def assertRequestTypesSent(self, subp, expected_request_types):
+        request_types = []
+        for call in subp.call_args_list:
+            args = call[0][0]
+            for arg in args:
+                if arg.startswith('DomU_Request'):
+                    request_types.append(arg.split()[1])
         self.assertEqual(expected_request_types, request_types)
 
     def test_valid_response_means_password_marked_as_saved(self):
         password = 'SekritSquirrel'
-        http_client = self._set_password_server_response(password)
+        subp = self._set_password_server_response(password)
         ds = DataSourceCloudStack({}, None, helpers.Paths({}))
         ds.get_data()
-        self.assertRequestTypesSent(http_client,
+        self.assertRequestTypesSent(subp,
                                     ['send_my_password', 'saved_password'])
 
     def _check_password_not_saved_for(self, response_string):
-        http_client = self._set_password_server_response(response_string)
+        subp = self._set_password_server_response(response_string)
         ds = DataSourceCloudStack({}, None, helpers.Paths({}))
         ds.get_data()
-        self.assertRequestTypesSent(http_client, ['send_my_password'])
+        self.assertRequestTypesSent(subp, ['send_my_password'])
 
     def test_password_not_saved_if_empty(self):
         self._check_password_not_saved_for('')
-- 
cgit v1.2.3


From 7e9e07608f33f57b620b2dca78cf0e1d9da4d53f Mon Sep 17 00:00:00 2001
From: Robert Schweikert <rjschwei@suse.com>
Date: Mon, 29 Jun 2015 10:58:59 -0400
Subject: - Fix logic change introduced by 1000.1.1

---
 cloudinit/config/cc_rightscale_userdata.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rightscale_userdata.py b/cloudinit/config/cc_rightscale_userdata.py
index 24880d13..0ecf3a4d 100644
--- a/cloudinit/config/cc_rightscale_userdata.py
+++ b/cloudinit/config/cc_rightscale_userdata.py
@@ -58,7 +58,7 @@ def handle(name, _cfg, cloud, log, _args):
 
     try:
         mdict = parse_qs(ud)
-        if mdict or MY_HOOKNAME not in mdict:
+        if not mdict or MY_HOOKNAME not in mdict:
             log.debug(("Skipping module %s, "
                        "did not find %s in parsed"
                        " raw userdata"), name, MY_HOOKNAME)
-- 
cgit v1.2.3


From afb5421ee717174b989bfed61333f2073b3f3f50 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Mon, 6 Jul 2015 15:33:33 +0100
Subject: Return a sensible value for DataSourceGCE.availability_zone.

---
 cloudinit/sources/DataSourceGCE.py          | 4 ++++
 tests/unittests/test_datasource/test_gce.py | 5 +++++
 2 files changed, 9 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index f4ed915d..1b28a68c 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -116,6 +116,10 @@ class DataSourceGCE(sources.DataSource):
             lines = self.metadata['public-keys'].splitlines()
             self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
 
+        if self.metadata['availability-zone']:
+            self.metadata['availability-zone'] = self.metadata[
+                'availability-zone'].split('/')[-1]
+
         encoding = self.metadata.get('user-data-encoding')
         if encoding:
             if encoding == 'base64':
diff --git a/tests/unittests/test_datasource/test_gce.py b/tests/unittests/test_datasource/test_gce.py
index 98b68f09..fa714070 100644
--- a/tests/unittests/test_datasource/test_gce.py
+++ b/tests/unittests/test_datasource/test_gce.py
@@ -159,3 +159,8 @@ class TestDataSourceGCE(test_helpers.HttprettyTestCase):
         self.ds.get_data()
 
         self.assertEqual([key_content], self.ds.get_public_ssh_keys())
+
+    def test_only_last_part_of_zone_used_for_availability_zone(self):
+        _set_mock_metadata()
+        self.ds.get_data()
+        self.assertEqual('bar', self.ds.availability_zone)
-- 
cgit v1.2.3


From c33ac7e2deecadeb7f34dacc4e91a3cad2c87ffd Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Thu, 16 Jul 2015 10:12:24 +0100
Subject: CloudSigma: encode/decode data before communicating over the serial
 channel

---
 cloudinit/cs_utils.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/cs_utils.py b/cloudinit/cs_utils.py
index dcf56431..83ac1a0e 100644
--- a/cloudinit/cs_utils.py
+++ b/cloudinit/cs_utils.py
@@ -83,8 +83,8 @@ class CepkoResult(object):
         connection = serial.Serial(port=SERIAL_PORT,
                                    timeout=READ_TIMEOUT,
                                    writeTimeout=WRITE_TIMEOUT)
-        connection.write(self.request)
-        return connection.readline().strip('\x04\n')
+        connection.write(self.request.encode('ascii'))
+        return connection.readline().strip(b'\x04\n').decode('ascii')
 
     def _marshal(self, raw_result):
         try:
-- 
cgit v1.2.3


From 55487d9eb52343bd72271b072734740b52b25c1d Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 21 Jul 2015 13:06:11 +0100
Subject: Refactor cc_mounts.sanitize_devname to make it easier to modify.

---
 cloudinit/config/cc_mounts.py                      | 104 +++++++---------
 .../unittests/test_handler/test_handler_mounts.py  | 133 +++++++++++++++++++++
 2 files changed, 177 insertions(+), 60 deletions(-)
 create mode 100644 tests/unittests/test_handler/test_handler_mounts.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 1cb1e839..f970c2ca 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -28,15 +28,15 @@ from cloudinit import type_utils
 from cloudinit import util
 
 # Shortname matches 'sda', 'sda1', 'xvda', 'hda', 'sdb', xvdb, vda, vdd1, sr0
-SHORTNAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
-SHORTNAME = re.compile(SHORTNAME_FILTER)
+DEVICE_NAME_FILTER = r"^([x]{0,1}[shv]d[a-z][0-9]*|sr[0-9]+)$"
+DEVICE_NAME_RE = re.compile(DEVICE_NAME_FILTER)
 WS = re.compile("[%s]+" % (whitespace))
 FSTAB_PATH = "/etc/fstab"
 
 LOG = logging.getLogger(__name__)
 
 
-def is_mdname(name):
+def is_meta_device_name(name):
     # return true if this is a metadata service name
     if name in ["ami", "root", "swap"]:
         return True
@@ -48,6 +48,23 @@ def is_mdname(name):
     return False
 
 
+def _get_nth_partition_for_device(device_path, partition_number):
+    potential_suffixes = [str(partition_number), 'p%s' % (partition_number,)]
+    for suffix in potential_suffixes:
+        potential_partition_device = '%s%s' % (device_path, suffix)
+        if os.path.exists(potential_partition_device):
+            return potential_partition_device
+    return None
+
+
+def _is_block_device(device_path, partition_path=None):
+    device_name = device_path.split('/')[-1]
+    sys_path = os.path.join('/sys/block/', device_name)
+    if partition_path is not None:
+        sys_path = os.path.join(sys_path, partition_path.split('/')[-1])
+    return os.path.exists(sys_path)
+
+
 def sanitize_devname(startname, transformer, log):
     log.debug("Attempting to determine the real name of %s", startname)
 
@@ -58,21 +75,34 @@ def sanitize_devname(startname, transformer, log):
         devname = "ephemeral0"
         log.debug("Adjusted mount option from ephemeral to ephemeral0")
 
-    (blockdev, part) = util.expand_dotted_devname(devname)
+    device_path, partition_number = util.expand_dotted_devname(devname)
 
-    if is_mdname(blockdev):
-        orig = blockdev
-        blockdev = transformer(blockdev)
-        if not blockdev:
+    if is_meta_device_name(device_path):
+        orig = device_path
+        device_path = transformer(device_path)
+        if not device_path:
             return None
-        if not blockdev.startswith("/"):
-            blockdev = "/dev/%s" % blockdev
-        log.debug("Mapped metadata name %s to %s", orig, blockdev)
+        if not device_path.startswith("/"):
+            device_path = "/dev/%s" % (device_path,)
+        log.debug("Mapped metadata name %s to %s", orig, device_path)
+    else:
+        if DEVICE_NAME_RE.match(startname):
+            device_path = "/dev/%s" % (device_path,)
+
+    partition_path = None
+    if partition_number is None:
+        partition_path = _get_nth_partition_for_device(device_path, 1)
     else:
-        if SHORTNAME.match(startname):
-            blockdev = "/dev/%s" % blockdev
+        partition_path = _get_nth_partition_for_device(device_path,
+                                                       partition_number)
+        if partition_path is None:
+            return None
 
-    return devnode_for_dev_part(blockdev, part)
+    if _is_block_device(device_path, partition_path):
+        if partition_path is not None:
+            return partition_path
+        return device_path
+    return None
 
 
 def suggested_swapsize(memsize=None, maxsize=None, fsys=None):
@@ -366,49 +396,3 @@ def handle(_name, cfg, cloud, log, _args):
         util.subp(("mount", "-a"))
     except:
         util.logexc(log, "Activating mounts via 'mount -a' failed")
-
-
-def devnode_for_dev_part(device, partition):
-    """
-    Find the name of the partition. While this might seem rather
-    straight forward, its not since some devices are '<device><partition>'
-    while others are '<device>p<partition>'. For example, /dev/xvda3 on EC2
-    will present as /dev/xvda3p1 for the first partition since /dev/xvda3 is
-    a block device.
-    """
-    if not os.path.exists(device):
-        return None
-
-    short_name = os.path.basename(device)
-    sys_path = "/sys/block/%s" % short_name
-
-    if not os.path.exists(sys_path):
-        LOG.debug("did not find entry for %s in /sys/block", short_name)
-        return None
-
-    sys_long_path = sys_path + "/" + short_name
-
-    if partition is not None:
-        partition = str(partition)
-
-    if partition is None:
-        valid_mappings = [sys_long_path + "1", sys_long_path + "p1"]
-    elif partition != "0":
-        valid_mappings = [sys_long_path + "%s" % partition,
-                          sys_long_path + "p%s" % partition]
-    else:
-        valid_mappings = []
-
-    for cdisk in valid_mappings:
-        if not os.path.exists(cdisk):
-            continue
-
-        dev_path = "/dev/%s" % os.path.basename(cdisk)
-        if os.path.exists(dev_path):
-            return dev_path
-
-    if partition is None or partition == "0":
-        return device
-
-    LOG.debug("Did not fine partition %s for device %s", partition, device)
-    return None
diff --git a/tests/unittests/test_handler/test_handler_mounts.py b/tests/unittests/test_handler/test_handler_mounts.py
new file mode 100644
index 00000000..355674b2
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_mounts.py
@@ -0,0 +1,133 @@
+import os.path
+import shutil
+import tempfile
+
+from cloudinit.config import cc_mounts
+
+from .. import helpers as test_helpers
+
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
+
+class TestSanitizeDevname(test_helpers.FilesystemMockingTestCase):
+
+    def setUp(self):
+        super(TestSanitizeDevname, self).setUp()
+        self.new_root = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.new_root)
+        self.patchOS(self.new_root)
+
+    def _touch(self, path):
+        path = os.path.join(self.new_root, path.lstrip('/'))
+        basedir = os.path.dirname(path)
+        if not os.path.exists(basedir):
+            os.makedirs(basedir)
+        open(path, 'a').close()
+
+    def _makedirs(self, directory):
+        directory = os.path.join(self.new_root, directory.lstrip('/'))
+        if not os.path.exists(directory):
+            os.makedirs(directory)
+
+    def mock_existence_of_disk(self, disk_path):
+        self._touch(disk_path)
+        self._makedirs(os.path.join('/sys/block', disk_path.split('/')[-1]))
+
+    def mock_existence_of_partition(self, disk_path, partition_number):
+        self.mock_existence_of_disk(disk_path)
+        self._touch(disk_path + str(partition_number))
+        disk_name = disk_path.split('/')[-1]
+        self._makedirs(os.path.join('/sys/block',
+                                    disk_name,
+                                    disk_name + str(partition_number)))
+
+    def test_existent_full_disk_path_is_returned(self):
+        disk_path = '/dev/sda'
+        self.mock_existence_of_disk(disk_path)
+        self.assertEqual(disk_path,
+                         cc_mounts.sanitize_devname(disk_path,
+                                                    lambda x: None,
+                                                    mock.Mock()))
+
+    def test_existent_disk_name_returns_full_path(self):
+        disk_name = 'sda'
+        disk_path = '/dev/' + disk_name
+        self.mock_existence_of_disk(disk_path)
+        self.assertEqual(disk_path,
+                         cc_mounts.sanitize_devname(disk_name,
+                                                    lambda x: None,
+                                                    mock.Mock()))
+
+    def test_existent_meta_disk_is_returned(self):
+        actual_disk_path = '/dev/sda'
+        self.mock_existence_of_disk(actual_disk_path)
+        self.assertEqual(
+            actual_disk_path,
+            cc_mounts.sanitize_devname('ephemeral0',
+                                       lambda x: actual_disk_path,
+                                       mock.Mock()))
+
+    def test_existent_meta_partition_is_returned(self):
+        disk_name, partition_part = '/dev/sda', '1'
+        actual_partition_path = disk_name + partition_part
+        self.mock_existence_of_partition(disk_name, partition_part)
+        self.assertEqual(
+            actual_partition_path,
+            cc_mounts.sanitize_devname('ephemeral0.1',
+                                       lambda x: disk_name,
+                                       mock.Mock()))
+
+    def test_existent_meta_partition_with_p_is_returned(self):
+        disk_name, partition_part = '/dev/sda', 'p1'
+        actual_partition_path = disk_name + partition_part
+        self.mock_existence_of_partition(disk_name, partition_part)
+        self.assertEqual(
+            actual_partition_path,
+            cc_mounts.sanitize_devname('ephemeral0.1',
+                                       lambda x: disk_name,
+                                       mock.Mock()))
+
+    def test_first_partition_returned_if_existent_disk_is_partitioned(self):
+        disk_name, partition_part = '/dev/sda', '1'
+        actual_partition_path = disk_name + partition_part
+        self.mock_existence_of_partition(disk_name, partition_part)
+        self.assertEqual(
+            actual_partition_path,
+            cc_mounts.sanitize_devname('ephemeral0',
+                                       lambda x: disk_name,
+                                       mock.Mock()))
+
+    def test_nth_partition_returned_if_requested(self):
+        disk_name, partition_part = '/dev/sda', '3'
+        actual_partition_path = disk_name + partition_part
+        self.mock_existence_of_partition(disk_name, partition_part)
+        self.assertEqual(
+            actual_partition_path,
+            cc_mounts.sanitize_devname('ephemeral0.3',
+                                       lambda x: disk_name,
+                                       mock.Mock()))
+
+    def test_transformer_returning_none_returns_none(self):
+        self.assertIsNone(
+            cc_mounts.sanitize_devname(
+                'ephemeral0', lambda x: None, mock.Mock()))
+
+    def test_missing_device_returns_none(self):
+        self.assertIsNone(
+            cc_mounts.sanitize_devname('/dev/sda', None, mock.Mock()))
+
+    def test_missing_sys_returns_none(self):
+        disk_path = '/dev/sda'
+        self._makedirs(disk_path)
+        self.assertIsNone(
+            cc_mounts.sanitize_devname(disk_path, None, mock.Mock()))
+
+    def test_existent_disk_but_missing_partition_returns_none(self):
+        disk_path = '/dev/sda'
+        self.mock_existence_of_disk(disk_path)
+        self.assertIsNone(
+            cc_mounts.sanitize_devname(
+                'ephemeral0.1', lambda x: disk_path, mock.Mock()))
-- 
cgit v1.2.3


From edc46ee7192376af65640a81c39335ebdfd196b6 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 21 Jul 2015 13:06:11 +0100
Subject: Extend disk_setup and mounts to handle /dev/disk symlinks.

---
 cloudinit/config/cc_disk_setup.py | 5 +++++
 cloudinit/config/cc_mounts.py     | 8 +++++---
 2 files changed, 10 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index e2ce6db4..92fa7a94 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -648,6 +648,8 @@ def mkpart(device, definition):
                 table_type: Which partition table to use, defaults to MBR
                 device: the device to work on.
     """
+    LOG.debug('Ensuring that we have a real device, not a symbolic link')
+    device = os.path.realpath(device)
 
     LOG.debug("Checking values for %s definition" % device)
     overwrite = definition.get('overwrite', False)
@@ -745,6 +747,9 @@ def mkfs(fs_cfg):
     fs_replace = fs_cfg.get('replace_fs', False)
     overwrite = fs_cfg.get('overwrite', False)
 
+    LOG.debug('Ensuring that we have a real device, not a symbolic link')
+    device = os.path.realpath(device)
+
     # This allows you to define the default ephemeral or swap
     LOG.debug("Checking %s against default devices", device)
 
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index f970c2ca..73b42f91 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -49,7 +49,8 @@ def is_meta_device_name(name):
 
 
 def _get_nth_partition_for_device(device_path, partition_number):
-    potential_suffixes = [str(partition_number), 'p%s' % (partition_number,)]
+    potential_suffixes = [str(partition_number), 'p%s' % (partition_number,),
+                          '-part%s' % (partition_number,)]
     for suffix in potential_suffixes:
         potential_partition_device = '%s%s' % (device_path, suffix)
         if os.path.exists(potential_partition_device):
@@ -58,10 +59,11 @@ def _get_nth_partition_for_device(device_path, partition_number):
 
 
 def _is_block_device(device_path, partition_path=None):
-    device_name = device_path.split('/')[-1]
+    device_name = os.path.realpath(device_path).split('/')[-1]
     sys_path = os.path.join('/sys/block/', device_name)
     if partition_path is not None:
-        sys_path = os.path.join(sys_path, partition_path.split('/')[-1])
+        sys_path = os.path.join(
+            sys_path, os.path.realpath(partition_path).split('/')[-1])
     return os.path.exists(sys_path)
 
 
-- 
cgit v1.2.3


From 9461b1235f7278440ffb84f1e3d95b3f906e444b Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 21 Jul 2015 13:06:11 +0100
Subject: Use /dev/disk devices for Azure ephemeral disk.

The ephemeral disk will not necessarily be assigned the same name at
each boot (LP: #1411582), so we use some udev rules to ensure we always
get the right one.
---
 cloudinit/sources/DataSourceAzure.py          | 39 ++++++++++++++-------------
 tests/unittests/test_datasource/test_azure.py |  6 +++--
 2 files changed, 25 insertions(+), 20 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index d0a882ca..1193d88b 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -254,7 +254,7 @@ class DataSourceAzureNet(sources.DataSource):
 
         self.metadata.update(fabric_data)
 
-        found_ephemeral = find_ephemeral_disk()
+        found_ephemeral = find_fabric_formatted_ephemeral_disk()
         if found_ephemeral:
             self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
             LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
@@ -276,30 +276,33 @@ def count_files(mp):
     return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
 
 
-def find_ephemeral_part():
+def find_fabric_formatted_ephemeral_part():
     """
-    Locate the default ephmeral0.1 device. This will be the first device
-    that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure
-    gets more ephemeral devices, this logic will only identify the first
-    such device.
+    Locate the first fabric formatted ephemeral device.
     """
-    c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL)
-    c_fstype_devs = util.find_devs_with("TYPE=ntfs")
-    for dev in c_label_devs:
-        if dev in c_fstype_devs:
-            return dev
+    potential_locations = ['/dev/disk/cloud/azure_resource-part1',
+                           '/dev/disk/azure/resource-part1']
+    device_location = None
+    for potential_location in potential_locations:
+        if os.path.exists(potential_location):
+            device_location = potential_location
+            break
+    if device_location is None:
+        return None
+    ntfs_devices = util.find_devs_with("TYPE=ntfs")
+    real_device = os.path.realpath(device_location)
+    if real_device in ntfs_devices:
+        return device_location
     return None
 
 
-def find_ephemeral_disk():
+def find_fabric_formatted_ephemeral_disk():
     """
     Get the ephemeral disk.
     """
-    part_dev = find_ephemeral_part()
-    if part_dev and str(part_dev[-1]).isdigit():
-        return part_dev[:-1]
-    elif part_dev:
-        return part_dev
+    part_dev = find_fabric_formatted_ephemeral_part()
+    if part_dev:
+        return part_dev.split('-')[0]
     return None
 
 
@@ -313,7 +316,7 @@ def support_new_ephemeral(cfg):
     new ephemeral device is detected, cloud-init overrides the default
     frequency for both disk-setup and mounts for the current boot only.
     """
-    device = find_ephemeral_part()
+    device = find_fabric_formatted_ephemeral_part()
     if not device:
         LOG.debug("no default fabric formated ephemeral0.1 found")
         return None
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 33b971f6..3b7e3293 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -475,10 +475,12 @@ class TestAzureBounce(TestCase):
             mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
                               mock.MagicMock(return_value=[])))
         self.patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'find_ephemeral_disk',
+            mock.patch.object(DataSourceAzure,
+                              'find_fabric_formatted_ephemeral_disk',
                               mock.MagicMock(return_value=None)))
         self.patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'find_ephemeral_part',
+            mock.patch.object(DataSourceAzure,
+                              'find_fabric_formatted_ephemeral_part',
                               mock.MagicMock(return_value=None)))
         self.patches.enter_context(
             mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
-- 
cgit v1.2.3


From b5230bc3e9d65692093cae9d2f4ca628435a382b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 21 Jul 2015 12:36:53 -0400
Subject: fix 'make pyflakes'

---
 cloudinit/sources/DataSourceAzure.py          | 2 +-
 tests/unittests/test_datasource/test_azure.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index d0a882ca..2ce85637 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -430,7 +430,7 @@ def write_files(datadir, files, dirmode=None):
                    elem.text != DEF_PASSWD_REDACTION):
                     elem.text = DEF_PASSWD_REDACTION
             return ET.tostring(root)
-        except Exception as e:
+        except Exception:
             LOG.critical("failed to redact userpassword in {}".format(fname))
             return cnt
 
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 33b971f6..d632bcb9 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -174,7 +174,7 @@ class TestAzureDataSource(TestCase):
     def xml_notequals(self, oxml, nxml):
         try:
             self.xml_equals(oxml, nxml)
-        except AssertionError as e:
+        except AssertionError:
             return
         raise AssertionError("XML is the same")
 
-- 
cgit v1.2.3


From 7ac13a1ef376a7b461673b90dfcd2c7c8612227a Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 21 Jul 2015 20:28:44 -0400
Subject: untested suggested change

LP: #1461242
---
 cloudinit/config/cc_ssh.py | 53 +++++++++++++++++++++++-----------------------
 1 file changed, 27 insertions(+), 26 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index ab6940fa..7a673994 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -20,6 +20,7 @@
 
 import glob
 import os
+import sys
 
 # Ensure this is aliased to a name not 'distros'
 # since the module attribute 'distros'
@@ -33,26 +34,17 @@ DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
 "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
 "rather than the user \\\"root\\\".\';echo;sleep 10\"")
 
-KEY_2_FILE = {
-    "rsa_private": ("/etc/ssh/ssh_host_rsa_key", 0o600),
-    "rsa_public": ("/etc/ssh/ssh_host_rsa_key.pub", 0o644),
-    "dsa_private": ("/etc/ssh/ssh_host_dsa_key", 0o600),
-    "dsa_public": ("/etc/ssh/ssh_host_dsa_key.pub", 0o644),
-    "ecdsa_private": ("/etc/ssh/ssh_host_ecdsa_key", 0o600),
-    "ecdsa_public": ("/etc/ssh/ssh_host_ecdsa_key.pub", 0o644),
-}
-
-PRIV_2_PUB = {
-    'rsa_private': 'rsa_public',
-    'dsa_private': 'dsa_public',
-    'ecdsa_private': 'ecdsa_public',
-}
-
-KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
+GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
+KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
 
-GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa']
+KEY_2_FILE = {}
+PRIV_2_PUB = {}
+for k in GENERATE_KEY_NAMES:
+    KEY_2_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
+    KEY_2_FILE.update({"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+    PRIV_2_PUB["%s_private" % k] = "%s_public" % k
 
-KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
+KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
 
 
 def handle(_name, cfg, cloud, log, _args):
@@ -92,18 +84,27 @@ def handle(_name, cfg, cloud, log, _args):
         genkeys = util.get_cfg_option_list(cfg,
                                            'ssh_genkeytypes',
                                            GENERATE_KEY_NAMES)
+        lang_c = os.environ.copy()
+        lang_c['LANG'] = 'C'
         for keytype in genkeys:
             keyfile = KEY_FILE_TPL % (keytype)
+            if os.path.exists(keyfile):
+                continue
             util.ensure_dir(os.path.dirname(keyfile))
-            if not os.path.exists(keyfile):
-                cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+            cmd = ['ssh-keygen', '-t', keytype, '-N', '', '-f', keyfile]
+
+            # TODO(harlowja): Is this guard needed?
+            with util.SeLinuxGuard("/etc/ssh", recursive=True):
                 try:
-                    # TODO(harlowja): Is this guard needed?
-                    with util.SeLinuxGuard("/etc/ssh", recursive=True):
-                        util.subp(cmd, capture=False)
-                except:
-                    util.logexc(log, "Failed generating key type %s to "
-                                "file %s", keytype, keyfile)
+                    out, err = util.subp(cmd, capture=True, rcs=[0, 1], env=lang_c)
+                    sys.stdout.write(util.encode_text(out))
+                except util.ProcessExecutionError as e:
+                    err = util.decode_binary(e.stderr).lower()
+                    if err.lower().startswith("unknown key"):
+                        log.debug("unknown key type %s" % keytype)
+                    else:
+                        util.logexc(log, "Failed generating key type %s to "
+                                    "file %s", keytype, keyfile)
 
     try:
         (users, _groups) = ds.normalize_users_groups(cfg, cloud.distro)
-- 
cgit v1.2.3


From 73c5bbfa31b922a0ba403216c0fc1f63b22a9262 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 22 Jul 2015 13:06:34 +0100
Subject: Make full data source available to code that handles mirror
 selection.

---
 cloudinit/distros/__init__.py                | 15 +++++++--------
 cloudinit/sources/__init__.py                |  3 +--
 tests/unittests/test_distros/test_generic.py | 22 +++++++++++++++-------
 3 files changed, 23 insertions(+), 17 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 8a947867..47b76c68 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -117,12 +117,11 @@ class Distro(object):
             arch = self.get_primary_arch()
         return _get_arch_package_mirror_info(mirror_info, arch)
 
-    def get_package_mirror_info(self, arch=None,
-                                availability_zone=None):
+    def get_package_mirror_info(self, arch=None, data_source=None):
         # This resolves the package_mirrors config option
         # down to a single dict of {mirror_name: mirror_url}
         arch_info = self._get_arch_package_mirror_info(arch)
-        return _get_package_mirror_info(availability_zone=availability_zone,
+        return _get_package_mirror_info(data_source=data_source,
                                         mirror_info=arch_info)
 
     def apply_network(self, settings, bring_up=True):
@@ -556,7 +555,7 @@ class Distro(object):
                 LOG.info("Added user '%s' to group '%s'" % (member, name))
 
 
-def _get_package_mirror_info(mirror_info, availability_zone=None,
+def _get_package_mirror_info(mirror_info, data_source=None,
                              mirror_filter=util.search_for_mirror):
     # given a arch specific 'mirror_info' entry (from package_mirrors)
     # search through the 'search' entries, and fallback appropriately
@@ -572,11 +571,11 @@ def _get_package_mirror_info(mirror_info, availability_zone=None,
     ec2_az_re = ("^[a-z][a-z]-(%s)-[1-9][0-9]*[a-z]$" % directions_re)
 
     subst = {}
-    if availability_zone:
-        subst['availability_zone'] = availability_zone
+    if data_source and data_source.availability_zone:
+        subst['availability_zone'] = data_source.availability_zone
 
-    if availability_zone and re.match(ec2_az_re, availability_zone):
-        subst['ec2_region'] = "%s" % availability_zone[0:-1]
+        if re.match(ec2_az_re, data_source.availability_zone):
+            subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
 
     results = {}
     for (name, mirror) in mirror_info.get('failsafe', {}).items():
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 39eab51b..1a036638 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -210,8 +210,7 @@ class DataSource(object):
             return hostname
 
     def get_package_mirror_info(self):
-        return self.distro.get_package_mirror_info(
-            availability_zone=self.availability_zone)
+        return self.distro.get_package_mirror_info(data_source=self)
 
 
 def normalize_pubkey_data(pubkey_data):
diff --git a/tests/unittests/test_distros/test_generic.py b/tests/unittests/test_distros/test_generic.py
index 8e3bd78a..6ed1704c 100644
--- a/tests/unittests/test_distros/test_generic.py
+++ b/tests/unittests/test_distros/test_generic.py
@@ -7,6 +7,11 @@ import os
 import shutil
 import tempfile
 
+try:
+    from unittest import mock
+except ImportError:
+    import mock
+
 unknown_arch_info = {
     'arches': ['default'],
     'failsafe': {'primary': 'http://fs-primary-default',
@@ -144,33 +149,35 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
 
     def test_get_package_mirror_info_az_ec2(self):
         arch_mirrors = gapmi(package_mirrors, arch="amd64")
+        data_source_mock = mock.Mock(availability_zone="us-east-1a")
 
-        results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_first)
         self.assertEqual(results,
                          {'primary': 'http://us-east-1.ec2/',
                           'security': 'http://security-mirror1-intel'})
 
-        results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_second)
         self.assertEqual(results,
                          {'primary': 'http://us-east-1a.clouds/',
                           'security': 'http://security-mirror2-intel'})
 
-        results = gpmi(arch_mirrors, availability_zone="us-east-1a",
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_none)
         self.assertEqual(results, package_mirrors[0]['failsafe'])
 
     def test_get_package_mirror_info_az_non_ec2(self):
         arch_mirrors = gapmi(package_mirrors, arch="amd64")
+        data_source_mock = mock.Mock(availability_zone="nova.cloudvendor")
 
-        results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_first)
         self.assertEqual(results,
                          {'primary': 'http://nova.cloudvendor.clouds/',
                           'security': 'http://security-mirror1-intel'})
 
-        results = gpmi(arch_mirrors, availability_zone="nova.cloudvendor",
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_last)
         self.assertEqual(results,
                          {'primary': 'http://nova.cloudvendor.clouds/',
@@ -178,17 +185,18 @@ class TestGenericDistro(helpers.FilesystemMockingTestCase):
 
     def test_get_package_mirror_info_none(self):
         arch_mirrors = gapmi(package_mirrors, arch="amd64")
+        data_source_mock = mock.Mock(availability_zone=None)
 
         # because both search entries here replacement based on
         # availability-zone, the filter will be called with an empty list and
         # failsafe should be taken.
-        results = gpmi(arch_mirrors, availability_zone=None,
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_first)
         self.assertEqual(results,
                          {'primary': 'http://fs-primary-intel',
                           'security': 'http://security-mirror1-intel'})
 
-        results = gpmi(arch_mirrors, availability_zone=None,
+        results = gpmi(arch_mirrors, data_source=data_source_mock,
                        mirror_filter=self.return_last)
         self.assertEqual(results,
                          {'primary': 'http://fs-primary-intel',
-- 
cgit v1.2.3


From bc7d57a0ae827978c87919c833bb5e8d2d5143c6 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Wed, 22 Jul 2015 13:06:34 +0100
Subject: Add DataSource.region and use it in mirror selection.

Also implement DataSource.region for EC2 and GCE data sources.
---
 cloudinit/distros/__init__.py      | 3 +++
 cloudinit/sources/DataSourceEc2.py | 7 +++++++
 cloudinit/sources/DataSourceGCE.py | 4 ++++
 cloudinit/sources/__init__.py      | 4 ++++
 config/cloud.cfg                   | 1 +
 5 files changed, 19 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 47b76c68..71884b32 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -577,6 +577,9 @@ def _get_package_mirror_info(mirror_info, data_source=None,
         if re.match(ec2_az_re, data_source.availability_zone):
             subst['ec2_region'] = "%s" % data_source.availability_zone[0:-1]
 
+    if data_source and data_source.region:
+        subst['region'] = data_source.region
+
     results = {}
     for (name, mirror) in mirror_info.get('failsafe', {}).items():
         results[name] = mirror
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 798869b7..0032d06c 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -197,6 +197,13 @@ class DataSourceEc2(sources.DataSource):
         except KeyError:
             return None
 
+    @property
+    def region(self):
+        az = self.availability_zone
+        if az is not None:
+            return az[:-1]
+        return None
+
 # Used to match classes to dependencies
 datasources = [
   (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 1b28a68c..7e7fc033 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -152,6 +152,10 @@ class DataSourceGCE(sources.DataSource):
     def availability_zone(self):
         return self.metadata['availability-zone']
 
+    @property
+    def region(self):
+        return self.availability_zone.rsplit('-', 1)[0]
+
 # Used to match classes to dependencies
 datasources = [
     (DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 1a036638..a21c08c2 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -157,6 +157,10 @@ class DataSource(object):
         return self.metadata.get('availability-zone',
                                  self.metadata.get('availability_zone'))
 
+    @property
+    def region(self):
+        return self.metadata.get('region')
+
     def get_instance_id(self):
         if not self.metadata or 'instance-id' not in self.metadata:
             # Return a magic not really instance id string
diff --git a/config/cloud.cfg b/config/cloud.cfg
index e96e1781..2b27f379 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -104,6 +104,7 @@ system_info:
          primary:
            - http://%(ec2_region)s.ec2.archive.ubuntu.com/ubuntu/
            - http://%(availability_zone)s.clouds.archive.ubuntu.com/ubuntu/
+           - http://%(region)s.clouds.archive.ubuntu.com/ubuntu/
          security: []
      - arches: [armhf, armel, default]
        failsafe:
-- 
cgit v1.2.3


From e86decfd53418ff481cb5db8d8b089417f1dafdf Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 22 Jul 2015 13:23:19 -0400
Subject: pep8 line too long

---
 cloudinit/config/cc_ssh.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 7a673994..cfaceac6 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -96,7 +96,8 @@ def handle(_name, cfg, cloud, log, _args):
             # TODO(harlowja): Is this guard needed?
             with util.SeLinuxGuard("/etc/ssh", recursive=True):
                 try:
-                    out, err = util.subp(cmd, capture=True, rcs=[0, 1], env=lang_c)
+                    out, err = util.subp(cmd, capture=True, rcs=[0, 1],
+                                         env=lang_c)
                     sys.stdout.write(util.encode_text(out))
                 except util.ProcessExecutionError as e:
                     err = util.decode_binary(e.stderr).lower()
-- 
cgit v1.2.3


From a21baa2bf5619358250821aa3c3d69dd54b81b18 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 22 Jul 2015 13:25:05 -0400
Subject: replace '2' with 'TO' in globals

---
 cloudinit/config/cc_ssh.py | 20 ++++++++++----------
 1 file changed, 10 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index cfaceac6..cd0174da 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -37,12 +37,12 @@ DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
 GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
 KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
 
-KEY_2_FILE = {}
-PRIV_2_PUB = {}
+CONFIG_KEY_TO_FILE = {}
+PRIV_TO_PUB = {}
 for k in GENERATE_KEY_NAMES:
-    KEY_2_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
-    KEY_2_FILE.update({"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
-    PRIV_2_PUB["%s_private" % k] = "%s_public" % k
+    CONFIG_KEY_TO_FILE.update({"%s_private" % k: (KEY_FILE_TPL % k, 0o600)})
+    CONFIG_KEY_TO_FILE.update({"%s_public" % k: (KEY_FILE_TPL % k + ".pub", 0o600)})
+    PRIV_TO_PUB["%s_private" % k] = "%s_public" % k
 
 KEY_GEN_TPL = 'o=$(ssh-keygen -yf "%s") && echo "$o" root@localhost > "%s"'
 
@@ -61,15 +61,15 @@ def handle(_name, cfg, cloud, log, _args):
     if "ssh_keys" in cfg:
         # if there are keys in cloud-config, use them
         for (key, val) in cfg["ssh_keys"].items():
-            if key in KEY_2_FILE:
-                tgt_fn = KEY_2_FILE[key][0]
-                tgt_perms = KEY_2_FILE[key][1]
+            if key in CONFIG_KEY_TO_FILE:
+                tgt_fn = CONFIG_KEY_TO_FILE[key][0]
+                tgt_perms = CONFIG_KEY_TO_FILE[key][1]
                 util.write_file(tgt_fn, val, tgt_perms)
 
-        for (priv, pub) in PRIV_2_PUB.items():
+        for (priv, pub) in PRIV_TO_PUB.items():
             if pub in cfg['ssh_keys'] or priv not in cfg['ssh_keys']:
                 continue
-            pair = (KEY_2_FILE[priv][0], KEY_2_FILE[pub][0])
+            pair = (CONFIG_KEY_TO_FILE[priv][0], CONFIG_KEY_TO_FILE[pub][0])
             cmd = ['sh', '-xc', KEY_GEN_TPL % pair]
             try:
                 # TODO(harlowja): Is this guard needed?
-- 
cgit v1.2.3


From 404baf87e58f2c9740c8b31137b727c77d182058 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 22 Jul 2015 14:10:58 -0400
Subject: fixes from testing

---
 cloudinit/config/cc_ssh.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index cd0174da..7fb13333 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -96,12 +96,11 @@ def handle(_name, cfg, cloud, log, _args):
             # TODO(harlowja): Is this guard needed?
             with util.SeLinuxGuard("/etc/ssh", recursive=True):
                 try:
-                    out, err = util.subp(cmd, capture=True, rcs=[0, 1],
-                                         env=lang_c)
-                    sys.stdout.write(util.encode_text(out))
+                    out, err = util.subp(cmd, capture=True, env=lang_c)
+                    sys.stdout.write(util.decode_binary(out))
                 except util.ProcessExecutionError as e:
                     err = util.decode_binary(e.stderr).lower()
-                    if err.lower().startswith("unknown key"):
+                    if e.exit_code == 1 and err.lower().startswith("unknown key"):
                         log.debug("unknown key type %s" % keytype)
                     else:
                         util.logexc(log, "Failed generating key type %s to "
-- 
cgit v1.2.3


From 4c799192a9d3132da0138e1adb640a9ab7e191b0 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 22 Jul 2015 14:15:57 -0400
Subject: improve log message

---
 cloudinit/config/cc_ssh.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 7fb13333..c2a7af72 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -101,7 +101,7 @@ def handle(_name, cfg, cloud, log, _args):
                 except util.ProcessExecutionError as e:
                     err = util.decode_binary(e.stderr).lower()
                     if e.exit_code == 1 and err.lower().startswith("unknown key"):
-                        log.debug("unknown key type %s" % keytype)
+                        log.debug("ssh-keygen: unknown key type '%s'", keytype)
                     else:
                         util.logexc(log, "Failed generating key type %s to "
                                     "file %s", keytype, keyfile)
-- 
cgit v1.2.3


From 452ea086beb8b28b41f5ccc610f4e5433010e35b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 22 Jul 2015 15:14:33 -0400
Subject: remove some overly verbose log messages

---
 cloudinit/config/cc_disk_setup.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index 92fa7a94..d5b0d1d7 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -648,7 +648,7 @@ def mkpart(device, definition):
                 table_type: Which partition table to use, defaults to MBR
                 device: the device to work on.
     """
-    LOG.debug('Ensuring that we have a real device, not a symbolic link')
+    # ensure that we get a real device rather than a symbolic link
     device = os.path.realpath(device)
 
     LOG.debug("Checking values for %s definition" % device)
@@ -747,7 +747,7 @@ def mkfs(fs_cfg):
     fs_replace = fs_cfg.get('replace_fs', False)
     overwrite = fs_cfg.get('overwrite', False)
 
-    LOG.debug('Ensuring that we have a real device, not a symbolic link')
+    # ensure that we get a real device rather than a symbolic link
     device = os.path.realpath(device)
 
     # This allows you to define the default ephemeral or swap
-- 
cgit v1.2.3


From 6970029c661ab858a55dd467e5c593694ab39512 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 24 Jul 2015 16:58:57 -0400
Subject: commit initial re-work/re-implementation of syslog config

---
 cloudinit/config/cc_syslog.py                      | 183 +++++++++++++++++++++
 doc/examples/cloud-config-syslog.txt               |  30 ++++
 .../unittests/test_handler/test_handler_syslog.py  |  32 ++++
 3 files changed, 245 insertions(+)
 create mode 100644 cloudinit/config/cc_syslog.py
 create mode 100644 doc/examples/cloud-config-syslog.txt
 create mode 100644 tests/unittests/test_handler/test_handler_syslog.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_syslog.py b/cloudinit/config/cc_syslog.py
new file mode 100644
index 00000000..21a8e8a9
--- /dev/null
+++ b/cloudinit/config/cc_syslog.py
@@ -0,0 +1,183 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#
+#    Author: Scott Moser <scott.moser@canonical.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+import re
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+
+BUILTIN_CFG = {
+    'remotes_file': '/etc/rsyslog.d/20-cloudinit-remotes.conf',
+    'remotes': {},
+    'service_name': 'rsyslog',
+}
+
+COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+HOST_PORT_RE = re.compile(
+    r'^(?P<proto>[@]{0,2})'
+    '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+    '([:](?P<port>[0-9]+))?$')
+
+
+def parse_remotes_line(line, name=None):
+    try:
+        data, comment = COMMENT_RE.split(line)
+        comment = comment.strip()
+    except ValueError:
+        data, comment = (line, None)
+
+    toks = data.strip().split()
+    match = None
+    if len(toks) == 1:
+        host_port = data
+    elif len(toks) == 2:
+        match, host_port = toks
+    else:
+        raise ValueError("line had multiple spaces: %s" % data)
+
+    toks = HOST_PORT_RE.match(host_port)
+
+    if not toks:
+        raise ValueError("Invalid host specification '%s'" % host_port)
+
+    proto = toks.group('proto')
+    addr = toks.group('addr') or toks.group('bracket_addr')
+    port = toks.group('port')
+    print("host_port: %s" % addr)
+    print("port: %s" % port)
+
+    if addr.startswith("[") and not addr.endswith("]"):
+        raise ValueError("host spec had invalid brackets: %s" % addr)
+
+    if comment and not name:
+        name = comment
+
+    t = SyslogRemotesLine(name=name, match=match, proto=proto,
+                          addr=addr, port=port)
+    t.validate()
+    return t
+
+
+class SyslogRemotesLine(object):
+    def __init__(self, name=None, match=None, proto=None, addr=None,
+                 port=None):
+        if not match:
+            match = "*.*"
+        self.name = name
+        self.match = match
+        if proto == "@":
+            proto = "udp"
+        elif proto == "@@":
+            proto = "tcp"
+        self.proto = proto
+
+        self.addr = addr
+        if port:
+            self.port = int(port)
+        else:
+            self.port = None
+
+    def validate(self):
+        if self.port:
+            try:
+                int(self.port)
+            except ValueError:
+                raise ValueError("port '%s' is not an integer" % self.port)
+
+        if not self.addr:
+            raise ValueError("address is required")
+
+    def __repr__(self):
+        return "[name=%s match=%s proto=%s address=%s port=%s]" % (
+            self.name, self.match, self.proto, self.addr, self.port
+        )
+
+    def __str__(self):
+        buf = self.match + " "
+        if self.proto == "udp":
+            buf += " @"
+        elif self.proto == "tcp":
+            buf += " @@"
+
+        if ":" in self.addr:
+            buf += "[" + self.addr + "]"
+        else:
+            buf += self.addr
+
+        if self.port:
+            buf += ":%s" % self.port
+
+        if self.name:
+            buf += " # %s" % self.name
+        return buf
+
+
+def remotes_to_rsyslog_cfg(remotes, header=None):
+    if not remotes:
+        return None
+    lines = []
+    if header is not None:
+        lines.append(header)
+    for name, line in remotes.items():
+        try:
+            lines.append(parse_remotes_line(line, name=name))
+        except ValueError as e:
+            LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
+    return '\n'.join(str(lines)) + '\n'
+
+
+def reload_syslog(systemd, service='rsyslog'):
+    if systemd:
+        cmd = ['systemctl', 'reload-or-try-restart', service]
+    else:
+        cmd = ['service', service, 'reload']
+    try:
+        util.subp(cmd, capture=True)
+    except util.ProcessExecutionError as e:
+        LOG.warn("Failed to reload syslog using '%s': %s", ' '.join(cmd), e)
+
+
+def handle(name, cfg, cloud, log, args):
+    cfgin = cfg.get('syslog')
+    if not cfgin:
+        cfgin = {}
+    mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
+
+    remotes_file = mycfg.get('remotes_file')
+    if util.is_false(remotes_file):
+        LOG.debug("syslog/remotes_file empty, doing nothing")
+        return
+
+    remotes = mycfg.get('remotes_dict', {})
+    if remotes and not isinstance(remotes, dict):
+        LOG.warn("syslog/remotes: content is not a dictionary")
+        return
+
+    config_data = remotes_to_rsyslog_cfg(
+        remotes, header="#cloud-init syslog module")
+
+    util.write_file(remotes_file, config_data)
+
+    reload_syslog(
+        systemd=cloud.distro.uses_systemd(),
+        service=mycfg.get('service_name'))
diff --git a/doc/examples/cloud-config-syslog.txt b/doc/examples/cloud-config-syslog.txt
new file mode 100644
index 00000000..9ec5e120
--- /dev/null
+++ b/doc/examples/cloud-config-syslog.txt
@@ -0,0 +1,30 @@
+## syslog module allows you to configure the systems syslog.
+## configuration of syslog is under the top level cloud-config
+## entry 'syslog'.
+##
+## "remotes"
+## remotes is a dictionary. items are of 'name: remote_info'
+## name is simply a name (example 'maas'). It has no importance other than
+## for cloud-init merging configs
+##
+## remote_info is of the format
+## * optional filter for log messages
+## default if not present: *.*
+## * optional leading '@' or '@@' (indicates udp or tcp).
+## default if not present (udp): @
+## This is rsyslog format for that. if not present, is '@' which is udp
+## * ipv4 or ipv6 or hostname
+## ipv6 addresses must be encoded in [::1] format. example: @[fd00::1]:514
+## * optional port
+## port defaults to 514
+##
+## Example:
+#cloud-config
+syslog:
+ remotes:
+  # udp to host 'maas.mydomain' port 514
+  maashost: maas.mydomain
+  # udp to ipv4 host on port 514
+  maas: "@[10.5.1.56]:514"
+  # tcp to host ipv6 host on port 555
+  maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
diff --git a/tests/unittests/test_handler/test_handler_syslog.py b/tests/unittests/test_handler/test_handler_syslog.py
new file mode 100644
index 00000000..bbfd521e
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_syslog.py
@@ -0,0 +1,32 @@
+from cloudinit.config.cc_syslog import (
+    parse_remotes_line, SyslogRemotesLine, remotes_to_rsyslog_cfg)
+from cloudinit import util
+from .. import helpers as t_help
+
+
+class TestParseRemotesLine(t_help.TestCase):
+    def test_valid_port(self):
+        r = parse_remotes_line("foo:9")
+        self.assertEqual(9, r.port)
+
+    def test_invalid_port(self):
+        with self.assertRaises(ValueError):
+            parse_remotes_line("*.* foo:abc")
+
+    def test_valid_ipv6(self):
+        r = parse_remotes_line("*.* [::1]")
+        self.assertEqual("*.* [::1]", str(r))
+
+    def test_valid_ipv6_with_port(self):
+        r = parse_remotes_line("*.* [::1]:100")
+        self.assertEqual(r.port, 100)
+        self.assertEqual(r.addr, "::1")
+        self.assertEqual("*.* [::1]:100", str(r))
+
+    def test_invalid_multiple_colon(self):
+        with self.assertRaises(ValueError):
+            parse_remotes_line("*.* ::1:100")
+
+    def test_name_in_string(self):
+        r = parse_remotes_line("syslog.host", name="foobar")
+        self.assertEqual("*.* syslog.host # foobar", str(r))
-- 
cgit v1.2.3


From 247f2cecb72a852a42f147645e80b538eee05f93 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 27 Jul 2015 14:20:29 -0400
Subject: update existing rsyslog module with better code and doc

---
 cloudinit/config/cc_rsyslog.py | 190 +++++++++++++++++++++++++++++++++--------
 1 file changed, 153 insertions(+), 37 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 57486edc..7d5657bc 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -17,37 +17,129 @@
 #
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+rsyslog module allows configuration of syslog logging via rsyslog
+Configuration is done under the cloud-config top level 'rsyslog'.
+
+Under 'rsyslog' you can define:
+  - configs:  [default=[]]
+    this is a list.  entries in it are a string or a dictionary.
+    each entry has 2 parts:
+       * content
+       * filename
+    if the entry is a string, then it is assigned to 'content'.
+    for each entry, content is written to the provided filename.
+    if filename is not provided, its default is read from 'config_filename'
+
+    Content here can be any valid rsyslog configuration.  No format
+    specific format is enforced.
+
+    For simply logging to an existing remote syslog server, via udp:
+      configs: ["*.* @192.168.1.1"]
+
+  - config_filename: [default=20-cloud-config.conf]
+    this is the file name to use if none is provided in a config entry.
+  - config_dir: [default=/etc/rsyslog.d]
+    this directory is used for filenames that are not absolute paths.
+  - service_reload_command: [default="auto"]
+    this command is executed if files have been written and thus the syslog
+    daemon needs to be told.
+
+Note, since cloud-init 0.5 a legacy version of rsyslog config has been
+present and is still supported. See below for the mappings between old
+value and new value:
+   old value           -> new value
+   'rsyslog'           -> rsyslog/configs
+   'rsyslog_filename'  -> rsyslog/config_filename
+   'rsyslog_dir'       -> rsyslog/config_dir
+
+the legacy config does not support 'service_reload_command'.
+
+Example config:
+  #cloud-config
+  rsyslog:
+    configs:
+      - "*.* @@192.158.1.1"
+      - content: "*.*   @@192.0.2.1:10514"
+      - filename: 01-examplecom.conf
+      - content: |
+        *.*   @@syslogd.example.com
+    config_dir: config_dir
+    config_filename: config_filename
+    service_reload_command: [your, syslog, restart, command]
+
+Example Legacy config:
+  #cloud-config
+  rsyslog:
+    - "*.* @@192.158.1.1"
+  rsyslog_dir: /etc/rsyslog-config.d/
+  rsyslog_filename: 99-local.conf
+"""
 
 import os
+import six
 
+from cloudinit import log as logging
 from cloudinit import util
 
 DEF_FILENAME = "20-cloud-config.conf"
 DEF_DIR = "/etc/rsyslog.d"
+DEF_RELOAD = "auto"
 
+KEYNAME_CONFIGS = 'configs'
+KEYNAME_FILENAME = 'config_filename'
+KEYNAME_DIR = 'config_dir'
+KEYNAME_RELOAD = 'service_reload_command'
+KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
+KEYNAME_LEGACY_DIR = 'rsyslog_dir'
 
-def handle(name, cfg, cloud, log, _args):
-    # rsyslog:
-    #  - "*.* @@192.158.1.1"
-    #  - content: "*.*   @@192.0.2.1:10514"
-    #  - filename: 01-examplecom.conf
-    #    content: |
-    #      *.*   @@syslogd.example.com
+LOG = logging.getLogger(__name__)
 
-    # process 'rsyslog'
-    if 'rsyslog' not in cfg:
-        log.debug(("Skipping module named %s,"
-                   " no 'rsyslog' key in configuration"), name)
-        return
 
-    def_dir = cfg.get('rsyslog_dir', DEF_DIR)
-    def_fname = cfg.get('rsyslog_filename', DEF_FILENAME)
+def reload_syslog(command=DEF_RELOAD, systemd=False):
+    service = 'rsyslog'
+    if command == DEF_RELOAD:
+        if systemd:
+            cmd = ['systemctl', 'reload-or-try-restart', service]
+        else:
+            cmd = ['service', service, 'reload']
+    else:
+        cmd = command
+    util.subp(cmd, capture=True)
+
+
+def load_config(cfg):
+    # return an updated config with entries of the correct type
+    # support converting the old top level format into new format
+    mycfg = cfg.get('rsyslog', {})
+
+    if isinstance(mycfg, list):
+        mycfg[KEYNAME_CONFIGS] = mycfg
+        if KEYNAME_LEGACY_FILENAME in cfg:
+            mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
+        if KEYNAME_LEGACY_DIR in cfg:
+            mycfg[KEYNAME_DIR] = cfg[KEYNAME_DIR]
+
+    fillup = (
+        (KEYNAME_DIR, DEF_DIR, six.text_type),
+        (KEYNAME_FILENAME, DEF_FILENAME, six.text_type)
+        (KEYNAME_RELOAD, DEF_RELOAD, (six.text_type, list)))
 
+    for key, default, vtypes in fillup:
+        if key not in mycfg or not isinstance(mycfg[key], vtypes):
+            mycfg[key] = default
+
+    return mycfg
+
+
+def apply_rsyslog_changes(configs, def_fname, cfg_dir):
+    # apply the changes in 'configs' to the paths in def_fname and cfg_dir
+    # return a list of the files changed
     files = []
-    for i, ent in enumerate(cfg['rsyslog']):
+    for cur_pos, ent in enumerate(configs):
         if isinstance(ent, dict):
             if "content" not in ent:
-                log.warn("No 'content' entry in config entry %s", i + 1)
+                LOG.warn("No 'content' entry in config entry %s", cur_pos + 1)
                 continue
             content = ent['content']
             filename = ent.get("filename", def_fname)
@@ -57,15 +149,14 @@ def handle(name, cfg, cloud, log, _args):
 
         filename = filename.strip()
         if not filename:
-            log.warn("Entry %s has an empty filename", i + 1)
+            LOG.warn("Entry %s has an empty filename", cur_pos + 1)
             continue
 
         if not filename.startswith("/"):
-            filename = os.path.join(def_dir, filename)
+            filename = os.path.join(cfg_dir, filename)
 
         # Truncate filename first time you see it
-        omode = "ab"
-        if filename not in files:
+        omode = "ab" if filename not in files:
             omode = "wb"
             files.append(filename)
 
@@ -73,24 +164,49 @@ def handle(name, cfg, cloud, log, _args):
             contents = "%s\n" % (content)
             util.write_file(filename, contents, omode=omode)
         except Exception:
-            util.logexc(log, "Failed to write to %s", filename)
+            util.logexc(LOG, "Failed to write to %s", filename)
+
+    return files
+
+
+def handle(name, cfg, cloud, log, _args):
+    # rsyslog:
+    #  configs:
+    #   - "*.* @@192.158.1.1"
+    #   - content: "*.*   @@192.0.2.1:10514"
+    #   - filename: 01-examplecom.conf
+    #     content: |
+    #       *.*   @@syslogd.example.com
+    #  config_dir: DEF_DIR
+    #  config_filename: DEF_FILENAME
+    #  service_reload: "auto"
+
+    if 'rsyslog' not in cfg:
+        log.debug(("Skipping module named %s,"
+                   " no 'rsyslog' key in configuration"), name)
+        return
+
+    mycfg = load_config(cfg)
+    if not mycfg['configs']:
+        log.debug("Empty config rsyslog['configs'], nothing to do")
+        return
+
+    changes = apply_rsyslog_changes(
+        configs=mycfg[KEYNAME_CONFIGS],
+        def_fname=mycfg[KEYNAME_FILENAME],
+        cfg_dir=mycfg[KEYNAME_DIR])
+
+    if not changes:
+        log.debug("restart of syslog not necessary, no changes made")
+        return
 
-    # Attempt to restart syslogd
-    restarted = False
     try:
-        # If this config module is running at cloud-init time
-        # (before rsyslog is running) we don't actually have to
-        # restart syslog.
-        #
-        # Upstart actually does what we want here, in that it doesn't
-        # start a service that wasn't running already on 'restart'
-        # it will also return failure on the attempt, so 'restarted'
-        # won't get set.
-        log.debug("Restarting rsyslog")
-        util.subp(['service', 'rsyslog', 'restart'])
-        restarted = True
-    except Exception:
-        util.logexc(log, "Failed restarting rsyslog")
+        restarted = reload_syslog(
+            service=mycfg[KEYNAME_RELOAD],
+            systemd=cloud.distro.uses_systemd()),
+    except util.ProcessExecutionError as e:
+        restarted = False
+        log.warn("Failed to reload syslog", e)
 
     if restarted:
         # This only needs to run if we *actually* restarted
@@ -98,4 +214,4 @@ def handle(name, cfg, cloud, log, _args):
         cloud.cycle_logging()
         # This should now use rsyslog if
         # the logging was setup to use it...
-        log.debug("%s configured %s files", name, files)
+        log.debug("%s configured %s files", name, changes)
-- 
cgit v1.2.3


From 22cb92421234c31b783ed9df01439c734535ba01 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 27 Jul 2015 16:49:30 -0400
Subject: add rsyslog tests

reasonable test of reworked rsyslog module
---
 cloudinit/config/cc_rsyslog.py                     |  27 ++---
 .../unittests/test_handler/test_handler_rsyslog.py | 113 +++++++++++++++++++++
 2 files changed, 122 insertions(+), 18 deletions(-)
 create mode 100644 tests/unittests/test_handler/test_handler_rsyslog.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 7d5657bc..a07200c3 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -113,17 +113,18 @@ def load_config(cfg):
     # support converting the old top level format into new format
     mycfg = cfg.get('rsyslog', {})
 
-    if isinstance(mycfg, list):
-        mycfg[KEYNAME_CONFIGS] = mycfg
+    if isinstance(cfg.get('rsyslog'), list):
+        mycfg = {KEYNAME_CONFIGS: cfg.get('rsyslog')}
         if KEYNAME_LEGACY_FILENAME in cfg:
             mycfg[KEYNAME_FILENAME] = cfg[KEYNAME_LEGACY_FILENAME]
         if KEYNAME_LEGACY_DIR in cfg:
-            mycfg[KEYNAME_DIR] = cfg[KEYNAME_DIR]
+            mycfg[KEYNAME_DIR] = cfg[KEYNAME_LEGACY_DIR]
 
     fillup = (
-        (KEYNAME_DIR, DEF_DIR, six.text_type),
-        (KEYNAME_FILENAME, DEF_FILENAME, six.text_type)
-        (KEYNAME_RELOAD, DEF_RELOAD, (six.text_type, list)))
+        (KEYNAME_CONFIGS, [], list),
+        (KEYNAME_DIR, DEF_DIR, six.string_types),
+        (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
+        (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)))
 
     for key, default, vtypes in fillup:
         if key not in mycfg or not isinstance(mycfg[key], vtypes):
@@ -156,7 +157,8 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
             filename = os.path.join(cfg_dir, filename)
 
         # Truncate filename first time you see it
-        omode = "ab" if filename not in files:
+        omode = "ab"
+        if filename not in files:
             omode = "wb"
             files.append(filename)
 
@@ -170,17 +172,6 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
 
 
 def handle(name, cfg, cloud, log, _args):
-    # rsyslog:
-    #  configs:
-    #   - "*.* @@192.158.1.1"
-    #   - content: "*.*   @@192.0.2.1:10514"
-    #   - filename: 01-examplecom.conf
-    #     content: |
-    #       *.*   @@syslogd.example.com
-    #  config_dir: DEF_DIR
-    #  config_filename: DEF_FILENAME
-    #  service_reload: "auto"
-
     if 'rsyslog' not in cfg:
         log.debug(("Skipping module named %s,"
                    " no 'rsyslog' key in configuration"), name)
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
new file mode 100644
index 00000000..3501ff95
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -0,0 +1,113 @@
+import os
+import shutil
+import tempfile
+
+from cloudinit.config.cc_rsyslog import (
+    load_config, DEF_FILENAME, DEF_DIR, DEF_RELOAD, apply_rsyslog_changes)
+from cloudinit import util
+
+from .. import helpers as t_help
+
+
+class TestLoadConfig(t_help.TestCase):
+    def setUp(self):
+        super(TestLoadConfig, self).setUp()
+        self.basecfg = {
+            'config_filename': DEF_FILENAME,
+            'config_dir': DEF_DIR,
+            'service_reload_command': DEF_RELOAD,
+            'configs': [],
+        }
+
+    def test_legacy_full(self):
+        found = load_config({
+            'rsyslog': ['*.* @192.168.1.1'],
+            'rsyslog_dir': "mydir",
+            'rsyslog_filename': "myfilename"})
+        expected = {
+            'configs': ['*.* @192.168.1.1'],
+            'config_dir': "mydir",
+            'config_filename': 'myfilename',
+            'service_reload_command': 'auto'}
+        self.assertEqual(found, expected)
+
+    def test_legacy_defaults(self):
+        found = load_config({
+            'rsyslog': ['*.* @192.168.1.1']})
+        self.basecfg.update({
+            'configs': ['*.* @192.168.1.1']})
+        self.assertEqual(found, self.basecfg)
+
+    def test_new_defaults(self):
+        self.assertEqual(load_config({}), self.basecfg)
+
+    def test_new_configs(self):
+        cfgs = ['*.* myhost', '*.* my2host']
+        self.basecfg.update({'configs': cfgs})
+        self.assertEqual(
+            load_config({'rsyslog': {'configs': cfgs}}),
+            self.basecfg)
+
+
+class TestApplyChanges(t_help.TestCase):
+    def setUp(self):
+        self.tmp = tempfile.mkdtemp()
+        self.addCleanup(shutil.rmtree, self.tmp)
+
+    def test_simple(self):
+        cfgline = "*.* foohost"
+        changed = apply_rsyslog_changes(
+            configs=[cfgline], def_fname="foo.cfg", cfg_dir=self.tmp)
+
+        fname = os.path.join(self.tmp, "foo.cfg")
+        self.assertEqual([fname], changed)
+        self.assertEqual(
+            util.load_file(fname), cfgline + "\n")
+
+    def test_multiple_files(self):
+        configs = [
+            '*.* foohost',
+            {'content': 'abc', 'filename': 'my.cfg'},
+            {'content': 'filefoo-content',
+             'filename': os.path.join(self.tmp, 'mydir/mycfg')},
+        ]
+
+        changed = apply_rsyslog_changes(
+            configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+
+        expected = [
+           (os.path.join(self.tmp, "default.cfg"),
+            "*.* foohost\n"),
+           (os.path.join(self.tmp, "my.cfg"), "abc\n"),
+           (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
+        ]
+        self.assertEqual([f[0] for f in expected], changed)
+        actual = []
+        for fname, _content in expected:
+            util.load_file(fname)
+            actual.append((fname, util.load_file(fname),))
+        self.assertEqual(expected, actual)
+
+    def test_repeat_def(self):
+        configs = ['*.* foohost', "*.warn otherhost"]
+
+        changed = apply_rsyslog_changes(
+            configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+
+        fname = os.path.join(self.tmp, "default.cfg")
+        self.assertEqual([fname], changed)
+
+        expected_content = '\n'.join([c for c in configs]) + '\n'
+        found_content = util.load_file(fname)
+        self.assertEqual(expected_content, found_content)
+
+    def test_multiline_content(self):
+        configs = ['line1', 'line2\nline3\n']
+
+        changed = apply_rsyslog_changes(
+            configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
+
+        fname = os.path.join(self.tmp, "default.cfg")
+        expected_content = '\n'.join([c for c in configs]) + '\n'
+        found_content = util.load_file(fname)
+        self.assertEqual(expected_content, found_content)
-- 
cgit v1.2.3


From 3a59cf0077f01b82a8b464568bcdd94ce2112b1d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 27 Jul 2015 20:42:56 -0400
Subject: _read_dmi_syspath: fix bad log message causing unintended exception

---
 ChangeLog         | 1 +
 cloudinit/util.py | 2 +-
 2 files changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 661e968b..bef5f77d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -56,6 +56,7 @@
  - distro mirrors: provide datasource to mirror selection code to support
    GCE regional mirrors. (LP: #1470890)
  - add udev rules that identify ephemeral device on Azure (LP: #1411582)
+ - _read_dmi_syspath: fix bad log message causing unintended exception
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index db4e02b8..02ba654a 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2145,7 +2145,7 @@ def _read_dmi_syspath(key):
         return key_data.strip()
 
     except Exception as e:
-        logexc(LOG, "failed read of %s", dmi_key_path, e)
+        logexc(LOG, "failed read of %s", dmi_key_path)
         return None
 
 
-- 
cgit v1.2.3


From 39f25660714e5640be3dce576a6cfdee9a1672c8 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 27 Jul 2015 20:53:44 -0400
Subject: use 'restart' rather than 'reload' on non-systemd systems

Testing on trusty shows that:
  service rsyslog reload
does produce a message like:
  rsyslogd was HUPed
but does not result in new config being in honored.
Using restart does, and with upstart that should be fine (as upstart will
start only if previously running).
---
 cloudinit/config/cc_rsyslog.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index a07200c3..82e382e8 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -102,7 +102,7 @@ def reload_syslog(command=DEF_RELOAD, systemd=False):
         if systemd:
             cmd = ['systemctl', 'reload-or-try-restart', service]
         else:
-            cmd = ['service', service, 'reload']
+            cmd = ['service', service, 'restart']
     else:
         cmd = command
     util.subp(cmd, capture=True)
-- 
cgit v1.2.3


From cae9122f88f8369454b03b97a5386d3135941fd9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 27 Jul 2015 21:02:51 -0400
Subject: fix kwarg

---
 cloudinit/config/cc_rsyslog.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 82e382e8..9599e925 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -193,7 +193,7 @@ def handle(name, cfg, cloud, log, _args):
 
     try:
         restarted = reload_syslog(
-            service=mycfg[KEYNAME_RELOAD],
+            command=mycfg[KEYNAME_RELOAD],
             systemd=cloud.distro.uses_systemd()),
     except util.ProcessExecutionError as e:
         restarted = False
-- 
cgit v1.2.3


From 6dd505fd02e0933d8770c8932a927940f6a0e025 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 09:27:26 -0400
Subject: add support for 'remotes'

---
 cloudinit/config/cc_rsyslog.py                     | 156 ++++++++++++++++++++-
 cloudinit/config/cc_syslog.py                      |   2 +-
 doc/examples/cloud-config-rsyslog.txt              |  37 +++++
 doc/examples/cloud-config-syslog.txt               |  30 ----
 .../unittests/test_handler/test_handler_rsyslog.py |  38 ++++-
 5 files changed, 227 insertions(+), 36 deletions(-)
 create mode 100644 doc/examples/cloud-config-rsyslog.txt
 delete mode 100644 doc/examples/cloud-config-syslog.txt

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 9599e925..8c02e826 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -37,10 +37,33 @@ Under 'rsyslog' you can define:
     For simply logging to an existing remote syslog server, via udp:
       configs: ["*.* @192.168.1.1"]
 
+  - remotes: [default={}]
+    This is a dictionary of name / value pairs.
+    In comparison to 'config's, it is more focused in that it only supports
+    remote syslog configuration.  It is not rsyslog specific, and could
+    convert to other syslog implementations.
+
+    Each entry in remotes is a 'name' and a 'value'.
+     * name: an string identifying the entry.  good practice would indicate
+       using a consistent and identifiable string for the producer.
+       For example, the MAAS service could use 'maas' as the key.
+     * value consists of the following parts:
+       * optional filter for log messages
+         default if not present: *.*
+       * optional leading '@' or '@@' (indicates udp or tcp respectively).
+         default if not present (udp): @
+         This is rsyslog format for that. if not present, is '@'.
+       * ipv4 or ipv6 or hostname
+         ipv6 addresses must be in [::1] format. (@[fd00::1]:514)
+       * optional port
+         port defaults to 514
+
   - config_filename: [default=20-cloud-config.conf]
     this is the file name to use if none is provided in a config entry.
+
   - config_dir: [default=/etc/rsyslog.d]
     this directory is used for filenames that are not absolute paths.
+
   - service_reload_command: [default="auto"]
     this command is executed if files have been written and thus the syslog
     daemon needs to be told.
@@ -61,9 +84,12 @@ Example config:
     configs:
       - "*.* @@192.158.1.1"
       - content: "*.*   @@192.0.2.1:10514"
-      - filename: 01-examplecom.conf
+        filename: 01-example.conf
       - content: |
         *.*   @@syslogd.example.com
+    remotes:
+      maas: "192.168.1.1"
+      juju: "10.0.4.1"
     config_dir: config_dir
     config_filename: config_filename
     service_reload_command: [your, syslog, restart, command]
@@ -77,6 +103,7 @@ Example Legacy config:
 """
 
 import os
+import re
 import six
 
 from cloudinit import log as logging
@@ -85,6 +112,7 @@ from cloudinit import util
 DEF_FILENAME = "20-cloud-config.conf"
 DEF_DIR = "/etc/rsyslog.d"
 DEF_RELOAD = "auto"
+DEF_REMOTES = {}
 
 KEYNAME_CONFIGS = 'configs'
 KEYNAME_FILENAME = 'config_filename'
@@ -92,9 +120,15 @@ KEYNAME_DIR = 'config_dir'
 KEYNAME_RELOAD = 'service_reload_command'
 KEYNAME_LEGACY_FILENAME = 'rsyslog_filename'
 KEYNAME_LEGACY_DIR = 'rsyslog_dir'
+KEYNAME_REMOTES = 'remotes'
 
 LOG = logging.getLogger(__name__)
 
+COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
+HOST_PORT_RE = re.compile(
+    r'^(?P<proto>[@]{0,2})'
+    '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
+    '([:](?P<port>[0-9]+))?$')
 
 def reload_syslog(command=DEF_RELOAD, systemd=False):
     service = 'rsyslog'
@@ -124,7 +158,8 @@ def load_config(cfg):
         (KEYNAME_CONFIGS, [], list),
         (KEYNAME_DIR, DEF_DIR, six.string_types),
         (KEYNAME_FILENAME, DEF_FILENAME, six.string_types),
-        (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)))
+        (KEYNAME_RELOAD, DEF_RELOAD, six.string_types + (list,)),
+        (KEYNAME_REMOTES, DEF_REMOTES, dict))
 
     for key, default, vtypes in fillup:
         if key not in mycfg or not isinstance(mycfg[key], vtypes):
@@ -171,6 +206,113 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
     return files
 
 
+def parse_remotes_line(line, name=None):
+    try:
+        data, comment = COMMENT_RE.split(line)
+        comment = comment.strip()
+    except ValueError:
+        data, comment = (line, None)
+
+    toks = data.strip().split()
+    match = None
+    if len(toks) == 1:
+        host_port = data
+    elif len(toks) == 2:
+        match, host_port = toks
+    else:
+        raise ValueError("line had multiple spaces: %s" % data)
+
+    toks = HOST_PORT_RE.match(host_port)
+
+    if not toks:
+        raise ValueError("Invalid host specification '%s'" % host_port)
+
+    proto = toks.group('proto')
+    addr = toks.group('addr') or toks.group('bracket_addr')
+    port = toks.group('port')
+
+    if addr.startswith("[") and not addr.endswith("]"):
+        raise ValueError("host spec had invalid brackets: %s" % addr)
+
+    if comment and not name:
+        name = comment
+
+    t = SyslogRemotesLine(name=name, match=match, proto=proto,
+                          addr=addr, port=port)
+    t.validate()
+    return t
+
+
+class SyslogRemotesLine(object):
+    def __init__(self, name=None, match=None, proto=None, addr=None,
+                 port=None):
+        if not match:
+            match = "*.*"
+        self.name = name
+        self.match = match
+        if proto == "@":
+            proto = "udp"
+        elif proto == "@@":
+            proto = "tcp"
+        self.proto = proto
+
+        self.addr = addr
+        if port:
+            self.port = int(port)
+        else:
+            self.port = None
+
+    def validate(self):
+        if self.port:
+            try:
+                int(self.port)
+            except ValueError:
+                raise ValueError("port '%s' is not an integer" % self.port)
+
+        if not self.addr:
+            raise ValueError("address is required")
+
+    def __repr__(self):
+        return "[name=%s match=%s proto=%s address=%s port=%s]" % (
+            self.name, self.match, self.proto, self.addr, self.port
+        )
+
+    def __str__(self):
+        buf = self.match + " "
+        if self.proto == "udp":
+            buf += " @"
+        elif self.proto == "tcp":
+            buf += " @@"
+
+        if ":" in self.addr:
+            buf += "[" + self.addr + "]"
+        else:
+            buf += self.addr
+
+        if self.port:
+            buf += ":%s" % self.port
+
+        if self.name:
+            buf += " # %s" % self.name
+        return buf
+
+
+def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
+    if not remotes:
+        return None
+    lines = []
+    if header is not None:
+        lines.append(header)
+    for name, line in remotes.items():
+        try:
+            lines.append(parse_remotes_line(line, name=name))
+        except ValueError as e:
+            LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
+    if footer is not None:
+        lines.append(footer)
+    return '\n'.join(str(lines)) + '\n'
+
+
 def handle(name, cfg, cloud, log, _args):
     if 'rsyslog' not in cfg:
         log.debug(("Skipping module named %s,"
@@ -178,6 +320,16 @@ def handle(name, cfg, cloud, log, _args):
         return
 
     mycfg = load_config(cfg)
+    configs = mycfg[KEYNAME_CONFIGS]
+
+    if mycfg[KEYNAME_REMOTES]:
+        configs.append(
+            remotes_to_rsyslog_cfg(
+                mycfg[KEYNAME_REMOTES],
+                header="# begin remotes",
+                footer="# end remotes",
+            ))
+
     if not mycfg['configs']:
         log.debug("Empty config rsyslog['configs'], nothing to do")
         return
diff --git a/cloudinit/config/cc_syslog.py b/cloudinit/config/cc_syslog.py
index 21a8e8a9..27793f8b 100644
--- a/cloudinit/config/cc_syslog.py
+++ b/cloudinit/config/cc_syslog.py
@@ -168,7 +168,7 @@ def handle(name, cfg, cloud, log, args):
         LOG.debug("syslog/remotes_file empty, doing nothing")
         return
 
-    remotes = mycfg.get('remotes_dict', {})
+    remotes = mycfg.get('remotes', {})
     if remotes and not isinstance(remotes, dict):
         LOG.warn("syslog/remotes: content is not a dictionary")
         return
diff --git a/doc/examples/cloud-config-rsyslog.txt b/doc/examples/cloud-config-rsyslog.txt
new file mode 100644
index 00000000..ff60e3a8
--- /dev/null
+++ b/doc/examples/cloud-config-rsyslog.txt
@@ -0,0 +1,37 @@
+## the rsyslog module allows you to configure the systems syslog.
+## configuration of syslog is under the top level cloud-config
+## entry 'rsyslog'.
+##
+## Example:
+#cloud-config
+rsyslog:
+ remotes:
+  # udp to host 'maas.mydomain' port 514
+  maashost: maas.mydomain
+  # udp to ipv4 host on port 514
+  maas: "@[10.5.1.56]:514"
+  # tcp to host ipv6 host on port 555
+  maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
+ configs:
+   - "*.* @@192.158.1.1"
+   - content: "*.*   @@192.0.2.1:10514"
+     filename: 01-example.conf
+   - content: |
+     *.*   @@syslogd.example.com
+ config_dir: /etc/rsyslog.d
+ config_filename: 20-cloud-config.conf
+ service_reload_command: [your, syslog, reload, command]
+
+## Additionally the following legacy format is supported
+## it is converted into the format above before use.
+##  rsyslog_filename -> rsyslog/config_filename
+##  rsyslog_dir -> rsyslog/config_dir
+##  rsyslog -> rsyslog/configs
+# rsyslog:
+#  - "*.* @@192.158.1.1"
+#  - content: "*.*   @@192.0.2.1:10514"
+#    filename: 01-example.conf
+#  - content: |
+#    *.*   @@syslogd.example.com
+# rsyslog_filename: 20-cloud-config.conf
+# rsyslog_dir: /etc/rsyslog.d
diff --git a/doc/examples/cloud-config-syslog.txt b/doc/examples/cloud-config-syslog.txt
deleted file mode 100644
index 9ec5e120..00000000
--- a/doc/examples/cloud-config-syslog.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-## syslog module allows you to configure the systems syslog.
-## configuration of syslog is under the top level cloud-config
-## entry 'syslog'.
-##
-## "remotes"
-## remotes is a dictionary. items are of 'name: remote_info'
-## name is simply a name (example 'maas'). It has no importance other than
-## for cloud-init merging configs
-##
-## remote_info is of the format
-## * optional filter for log messages
-## default if not present: *.*
-## * optional leading '@' or '@@' (indicates udp or tcp).
-## default if not present (udp): @
-## This is rsyslog format for that. if not present, is '@' which is udp
-## * ipv4 or ipv6 or hostname
-## ipv6 addresses must be encoded in [::1] format. example: @[fd00::1]:514
-## * optional port
-## port defaults to 514
-##
-## Example:
-#cloud-config
-syslog:
- remotes:
-  # udp to host 'maas.mydomain' port 514
-  maashost: maas.mydomain
-  # udp to ipv4 host on port 514
-  maas: "@[10.5.1.56]:514"
-  # tcp to host ipv6 host on port 555
-  maasipv6: "*.* @@[FE80::0202:B3FF:FE1E:8329]:555"
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
index 3501ff95..0bace685 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -3,7 +3,8 @@ import shutil
 import tempfile
 
 from cloudinit.config.cc_rsyslog import (
-    load_config, DEF_FILENAME, DEF_DIR, DEF_RELOAD, apply_rsyslog_changes)
+    apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config,
+    parse_remotes_line)
 from cloudinit import util
 
 from .. import helpers as t_help
@@ -17,6 +18,7 @@ class TestLoadConfig(t_help.TestCase):
             'config_dir': DEF_DIR,
             'service_reload_command': DEF_RELOAD,
             'configs': [],
+            'remotes': {},
         }
 
     def test_legacy_full(self):
@@ -24,12 +26,14 @@ class TestLoadConfig(t_help.TestCase):
             'rsyslog': ['*.* @192.168.1.1'],
             'rsyslog_dir': "mydir",
             'rsyslog_filename': "myfilename"})
-        expected = {
+        self.basecfg.update({
             'configs': ['*.* @192.168.1.1'],
             'config_dir': "mydir",
             'config_filename': 'myfilename',
             'service_reload_command': 'auto'}
-        self.assertEqual(found, expected)
+            )
+
+        self.assertEqual(found, self.basecfg)
 
     def test_legacy_defaults(self):
         found = load_config({
@@ -111,3 +115,31 @@ class TestApplyChanges(t_help.TestCase):
         expected_content = '\n'.join([c for c in configs]) + '\n'
         found_content = util.load_file(fname)
         self.assertEqual(expected_content, found_content)
+
+
+class TestParseRemotesLine(t_help.TestCase):
+    def test_valid_port(self):
+        r = parse_remotes_line("foo:9")
+        self.assertEqual(9, r.port)
+
+    def test_invalid_port(self):
+        with self.assertRaises(ValueError):
+            parse_remotes_line("*.* foo:abc")
+
+    def test_valid_ipv6(self):
+        r = parse_remotes_line("*.* [::1]")
+        self.assertEqual("*.* [::1]", str(r))
+
+    def test_valid_ipv6_with_port(self):
+        r = parse_remotes_line("*.* [::1]:100")
+        self.assertEqual(r.port, 100)
+        self.assertEqual(r.addr, "::1")
+        self.assertEqual("*.* [::1]:100", str(r))
+
+    def test_invalid_multiple_colon(self):
+        with self.assertRaises(ValueError):
+            parse_remotes_line("*.* ::1:100")
+
+    def test_name_in_string(self):
+        r = parse_remotes_line("syslog.host", name="foobar")
+        self.assertEqual("*.* syslog.host # foobar", str(r))
-- 
cgit v1.2.3


From f61a62434b36ab873b2b82a5ba69eda826755bfc Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 10:12:02 -0400
Subject: fix bug in remotes_to_rsyslog_cfg, add test

---
 cloudinit/config/cc_rsyslog.py                     |  4 +--
 .../unittests/test_handler/test_handler_rsyslog.py | 32 ++++++++++++++++++----
 2 files changed, 28 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 8c02e826..915ab420 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -305,12 +305,12 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
         lines.append(header)
     for name, line in remotes.items():
         try:
-            lines.append(parse_remotes_line(line, name=name))
+            lines.append(str(parse_remotes_line(line, name=name)))
         except ValueError as e:
             LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
     if footer is not None:
         lines.append(footer)
-    return '\n'.join(str(lines)) + '\n'
+    return '\n'.join(lines) + "\n"
 
 
 def handle(name, cfg, cloud, log, _args):
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
index 0bace685..292559c5 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -4,7 +4,7 @@ import tempfile
 
 from cloudinit.config.cc_rsyslog import (
     apply_rsyslog_changes, DEF_DIR, DEF_FILENAME, DEF_RELOAD, load_config,
-    parse_remotes_line)
+    parse_remotes_line, remotes_to_rsyslog_cfg)
 from cloudinit import util
 
 from .. import helpers as t_help
@@ -80,10 +80,10 @@ class TestApplyChanges(t_help.TestCase):
             configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
 
         expected = [
-           (os.path.join(self.tmp, "default.cfg"),
-            "*.* foohost\n"),
-           (os.path.join(self.tmp, "my.cfg"), "abc\n"),
-           (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
+            (os.path.join(self.tmp, "default.cfg"),
+             "*.* foohost\n"),
+            (os.path.join(self.tmp, "my.cfg"), "abc\n"),
+            (os.path.join(self.tmp, "mydir/mycfg"), "filefoo-content\n"),
         ]
         self.assertEqual([f[0] for f in expected], changed)
         actual = []
@@ -108,7 +108,7 @@ class TestApplyChanges(t_help.TestCase):
     def test_multiline_content(self):
         configs = ['line1', 'line2\nline3\n']
 
-        changed = apply_rsyslog_changes(
+        apply_rsyslog_changes(
             configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
 
         fname = os.path.join(self.tmp, "default.cfg")
@@ -143,3 +143,23 @@ class TestParseRemotesLine(t_help.TestCase):
     def test_name_in_string(self):
         r = parse_remotes_line("syslog.host", name="foobar")
         self.assertEqual("*.* syslog.host # foobar", str(r))
+
+
+class TestRemotesToSyslog(t_help.TestCase):
+    def test_simple(self):
+        # str rendered line must appear in remotes_to_ryslog_cfg return
+        mycfg = "*.* myhost"
+        myline = str(parse_remotes_line(mycfg, name="myname"))
+        r = remotes_to_rsyslog_cfg({'myname': mycfg})
+        lines = r.splitlines()
+        self.assertEqual(1, len(lines))
+        self.assertTrue(myline in r.splitlines())
+
+    def test_header_footer(self):
+        header = "#foo head"
+        footer = "#foo foot"
+        r = remotes_to_rsyslog_cfg(
+            {'myname': "*.* myhost"}, header=header, footer=footer)
+        lines = r.splitlines()
+        self.assertTrue(header, lines[0])
+        self.assertTrue(footer, lines[-1])
-- 
cgit v1.2.3


From c6e7fb1752a93ed534080adf0588e4c7cdd99071 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 10:34:31 -0400
Subject: add trailing newline only if necessary

---
 cloudinit/config/cc_rsyslog.py                       | 9 +++++----
 tests/unittests/test_handler/test_handler_rsyslog.py | 2 +-
 2 files changed, 6 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 915ab420..2bb00728 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -188,8 +188,7 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
             LOG.warn("Entry %s has an empty filename", cur_pos + 1)
             continue
 
-        if not filename.startswith("/"):
-            filename = os.path.join(cfg_dir, filename)
+        filename = os.path.join(cfg_dir, filename)
 
         # Truncate filename first time you see it
         omode = "ab"
@@ -198,8 +197,10 @@ def apply_rsyslog_changes(configs, def_fname, cfg_dir):
             files.append(filename)
 
         try:
-            contents = "%s\n" % (content)
-            util.write_file(filename, contents, omode=omode)
+            endl = ""
+            if not content.endswith("\n"):
+                endl = "\n"
+            util.write_file(filename, content + endl, omode=omode)
         except Exception:
             util.logexc(LOG, "Failed to write to %s", filename)
 
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
index 292559c5..e7666615 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -112,7 +112,7 @@ class TestApplyChanges(t_help.TestCase):
             configs=configs, def_fname="default.cfg", cfg_dir=self.tmp)
 
         fname = os.path.join(self.tmp, "default.cfg")
-        expected_content = '\n'.join([c for c in configs]) + '\n'
+        expected_content = '\n'.join([c for c in configs])
         found_content = util.load_file(fname)
         self.assertEqual(expected_content, found_content)
 
-- 
cgit v1.2.3


From 0a581732a40ff814b1fc0dace9f519b7a5c779e6 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 10:48:32 -0400
Subject: must declare proto of '@'

---
 cloudinit/config/cc_rsyslog.py                       | 6 ++++--
 tests/unittests/test_handler/test_handler_rsyslog.py | 6 +++---
 2 files changed, 7 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 2bb00728..5ecf1629 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -251,6 +251,8 @@ class SyslogRemotesLine(object):
             match = "*.*"
         self.name = name
         self.match = match
+        if not proto:
+            proto = "udp"
         if proto == "@":
             proto = "udp"
         elif proto == "@@":
@@ -281,9 +283,9 @@ class SyslogRemotesLine(object):
     def __str__(self):
         buf = self.match + " "
         if self.proto == "udp":
-            buf += " @"
+            buf += "@"
         elif self.proto == "tcp":
-            buf += " @@"
+            buf += "@@"
 
         if ":" in self.addr:
             buf += "[" + self.addr + "]"
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
index e7666615..7bfa65a9 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -128,13 +128,13 @@ class TestParseRemotesLine(t_help.TestCase):
 
     def test_valid_ipv6(self):
         r = parse_remotes_line("*.* [::1]")
-        self.assertEqual("*.* [::1]", str(r))
+        self.assertEqual("*.* @[::1]", str(r))
 
     def test_valid_ipv6_with_port(self):
         r = parse_remotes_line("*.* [::1]:100")
         self.assertEqual(r.port, 100)
         self.assertEqual(r.addr, "::1")
-        self.assertEqual("*.* [::1]:100", str(r))
+        self.assertEqual("*.* @[::1]:100", str(r))
 
     def test_invalid_multiple_colon(self):
         with self.assertRaises(ValueError):
@@ -142,7 +142,7 @@ class TestParseRemotesLine(t_help.TestCase):
 
     def test_name_in_string(self):
         r = parse_remotes_line("syslog.host", name="foobar")
-        self.assertEqual("*.* syslog.host # foobar", str(r))
+        self.assertEqual("*.* @syslog.host # foobar", str(r))
 
 
 class TestRemotesToSyslog(t_help.TestCase):
-- 
cgit v1.2.3


From d5f93dbd908c349548554cb69ca3afd05077cf57 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 10:49:48 -0400
Subject: remove 'syslog' module (its been moved to rsyslog)

---
 cloudinit/config/cc_syslog.py                      | 183 ---------------------
 .../unittests/test_handler/test_handler_syslog.py  |  32 ----
 2 files changed, 215 deletions(-)
 delete mode 100644 cloudinit/config/cc_syslog.py
 delete mode 100644 tests/unittests/test_handler/test_handler_syslog.py

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_syslog.py b/cloudinit/config/cc_syslog.py
deleted file mode 100644
index 27793f8b..00000000
--- a/cloudinit/config/cc_syslog.py
+++ /dev/null
@@ -1,183 +0,0 @@
-# vi: ts=4 expandtab
-#
-#    Copyright (C) 2015 Canonical Ltd.
-#
-#    Author: Scott Moser <scott.moser@canonical.com>
-#
-#    This program is free software: you can redistribute it and/or modify
-#    it under the terms of the GNU General Public License version 3, as
-#    published by the Free Software Foundation.
-#
-#    This program is distributed in the hope that it will be useful,
-#    but WITHOUT ANY WARRANTY; without even the implied warranty of
-#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-#    GNU General Public License for more details.
-#
-#    You should have received a copy of the GNU General Public License
-#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
-
-from cloudinit import log as logging
-from cloudinit import util
-from cloudinit.settings import PER_INSTANCE
-
-import re
-
-LOG = logging.getLogger(__name__)
-
-frequency = PER_INSTANCE
-
-BUILTIN_CFG = {
-    'remotes_file': '/etc/rsyslog.d/20-cloudinit-remotes.conf',
-    'remotes': {},
-    'service_name': 'rsyslog',
-}
-
-COMMENT_RE = re.compile(r'[ ]*[#]+[ ]*')
-HOST_PORT_RE = re.compile(
-    r'^(?P<proto>[@]{0,2})'
-    '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
-    '([:](?P<port>[0-9]+))?$')
-
-
-def parse_remotes_line(line, name=None):
-    try:
-        data, comment = COMMENT_RE.split(line)
-        comment = comment.strip()
-    except ValueError:
-        data, comment = (line, None)
-
-    toks = data.strip().split()
-    match = None
-    if len(toks) == 1:
-        host_port = data
-    elif len(toks) == 2:
-        match, host_port = toks
-    else:
-        raise ValueError("line had multiple spaces: %s" % data)
-
-    toks = HOST_PORT_RE.match(host_port)
-
-    if not toks:
-        raise ValueError("Invalid host specification '%s'" % host_port)
-
-    proto = toks.group('proto')
-    addr = toks.group('addr') or toks.group('bracket_addr')
-    port = toks.group('port')
-    print("host_port: %s" % addr)
-    print("port: %s" % port)
-
-    if addr.startswith("[") and not addr.endswith("]"):
-        raise ValueError("host spec had invalid brackets: %s" % addr)
-
-    if comment and not name:
-        name = comment
-
-    t = SyslogRemotesLine(name=name, match=match, proto=proto,
-                          addr=addr, port=port)
-    t.validate()
-    return t
-
-
-class SyslogRemotesLine(object):
-    def __init__(self, name=None, match=None, proto=None, addr=None,
-                 port=None):
-        if not match:
-            match = "*.*"
-        self.name = name
-        self.match = match
-        if proto == "@":
-            proto = "udp"
-        elif proto == "@@":
-            proto = "tcp"
-        self.proto = proto
-
-        self.addr = addr
-        if port:
-            self.port = int(port)
-        else:
-            self.port = None
-
-    def validate(self):
-        if self.port:
-            try:
-                int(self.port)
-            except ValueError:
-                raise ValueError("port '%s' is not an integer" % self.port)
-
-        if not self.addr:
-            raise ValueError("address is required")
-
-    def __repr__(self):
-        return "[name=%s match=%s proto=%s address=%s port=%s]" % (
-            self.name, self.match, self.proto, self.addr, self.port
-        )
-
-    def __str__(self):
-        buf = self.match + " "
-        if self.proto == "udp":
-            buf += " @"
-        elif self.proto == "tcp":
-            buf += " @@"
-
-        if ":" in self.addr:
-            buf += "[" + self.addr + "]"
-        else:
-            buf += self.addr
-
-        if self.port:
-            buf += ":%s" % self.port
-
-        if self.name:
-            buf += " # %s" % self.name
-        return buf
-
-
-def remotes_to_rsyslog_cfg(remotes, header=None):
-    if not remotes:
-        return None
-    lines = []
-    if header is not None:
-        lines.append(header)
-    for name, line in remotes.items():
-        try:
-            lines.append(parse_remotes_line(line, name=name))
-        except ValueError as e:
-            LOG.warn("failed loading remote %s: %s [%s]", name, line, e)
-    return '\n'.join(str(lines)) + '\n'
-
-
-def reload_syslog(systemd, service='rsyslog'):
-    if systemd:
-        cmd = ['systemctl', 'reload-or-try-restart', service]
-    else:
-        cmd = ['service', service, 'reload']
-    try:
-        util.subp(cmd, capture=True)
-    except util.ProcessExecutionError as e:
-        LOG.warn("Failed to reload syslog using '%s': %s", ' '.join(cmd), e)
-
-
-def handle(name, cfg, cloud, log, args):
-    cfgin = cfg.get('syslog')
-    if not cfgin:
-        cfgin = {}
-    mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
-
-    remotes_file = mycfg.get('remotes_file')
-    if util.is_false(remotes_file):
-        LOG.debug("syslog/remotes_file empty, doing nothing")
-        return
-
-    remotes = mycfg.get('remotes', {})
-    if remotes and not isinstance(remotes, dict):
-        LOG.warn("syslog/remotes: content is not a dictionary")
-        return
-
-    config_data = remotes_to_rsyslog_cfg(
-        remotes, header="#cloud-init syslog module")
-
-    util.write_file(remotes_file, config_data)
-
-    reload_syslog(
-        systemd=cloud.distro.uses_systemd(),
-        service=mycfg.get('service_name'))
diff --git a/tests/unittests/test_handler/test_handler_syslog.py b/tests/unittests/test_handler/test_handler_syslog.py
deleted file mode 100644
index bbfd521e..00000000
--- a/tests/unittests/test_handler/test_handler_syslog.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from cloudinit.config.cc_syslog import (
-    parse_remotes_line, SyslogRemotesLine, remotes_to_rsyslog_cfg)
-from cloudinit import util
-from .. import helpers as t_help
-
-
-class TestParseRemotesLine(t_help.TestCase):
-    def test_valid_port(self):
-        r = parse_remotes_line("foo:9")
-        self.assertEqual(9, r.port)
-
-    def test_invalid_port(self):
-        with self.assertRaises(ValueError):
-            parse_remotes_line("*.* foo:abc")
-
-    def test_valid_ipv6(self):
-        r = parse_remotes_line("*.* [::1]")
-        self.assertEqual("*.* [::1]", str(r))
-
-    def test_valid_ipv6_with_port(self):
-        r = parse_remotes_line("*.* [::1]:100")
-        self.assertEqual(r.port, 100)
-        self.assertEqual(r.addr, "::1")
-        self.assertEqual("*.* [::1]:100", str(r))
-
-    def test_invalid_multiple_colon(self):
-        with self.assertRaises(ValueError):
-            parse_remotes_line("*.* ::1:100")
-
-    def test_name_in_string(self):
-        r = parse_remotes_line("syslog.host", name="foobar")
-        self.assertEqual("*.* syslog.host # foobar", str(r))
-- 
cgit v1.2.3


From 55472eb02eaa5b88676a96e006f6838020f8ffe3 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 11:44:32 -0400
Subject: rsyslog: skip empty or None in remotes format

This allows user to specify the following to overwrite a previously
declared entry without warnings.
 rsyslog: {'remotes': {'foo': None}}
---
 cloudinit/config/cc_rsyslog.py                       | 2 ++
 tests/unittests/test_handler/test_handler_rsyslog.py | 9 +++++++++
 2 files changed, 11 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index 5ecf1629..a0132d28 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -307,6 +307,8 @@ def remotes_to_rsyslog_cfg(remotes, header=None, footer=None):
     if header is not None:
         lines.append(header)
     for name, line in remotes.items():
+        if not line:
+            continue
         try:
             lines.append(str(parse_remotes_line(line, name=name)))
         except ValueError as e:
diff --git a/tests/unittests/test_handler/test_handler_rsyslog.py b/tests/unittests/test_handler/test_handler_rsyslog.py
index 7bfa65a9..b932165c 100644
--- a/tests/unittests/test_handler/test_handler_rsyslog.py
+++ b/tests/unittests/test_handler/test_handler_rsyslog.py
@@ -163,3 +163,12 @@ class TestRemotesToSyslog(t_help.TestCase):
         lines = r.splitlines()
         self.assertTrue(header, lines[0])
         self.assertTrue(footer, lines[-1])
+
+    def test_with_empty_or_null(self):
+        mycfg = "*.* myhost"
+        myline = str(parse_remotes_line(mycfg, name="myname"))
+        r = remotes_to_rsyslog_cfg(
+            {'myname': mycfg, 'removed': None, 'removed2': ""})
+        lines = r.splitlines()
+        self.assertEqual(1, len(lines))
+        self.assertTrue(myline in r.splitlines())
-- 
cgit v1.2.3


From c33b3becebfa7bf3f6e2ee67ea7bc3def6feeb8c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 28 Jul 2015 16:15:10 -0400
Subject: pull from 2.0 trunk @ a433358bbcf4e8a771b80cae34468409ed5a811d

---
 cloudinit/registry.py             |  23 +++++
 cloudinit/reporting.py            | 122 ++++++++++++++++++++++++
 tests/unittests/test_registry.py  |  28 ++++++
 tests/unittests/test_reporting.py | 192 ++++++++++++++++++++++++++++++++++++++
 4 files changed, 365 insertions(+)
 create mode 100644 cloudinit/registry.py
 create mode 100644 cloudinit/reporting.py
 create mode 100644 tests/unittests/test_registry.py
 create mode 100644 tests/unittests/test_reporting.py

(limited to 'cloudinit')

diff --git a/cloudinit/registry.py b/cloudinit/registry.py
new file mode 100644
index 00000000..46cf0585
--- /dev/null
+++ b/cloudinit/registry.py
@@ -0,0 +1,23 @@
+import copy
+
+
+class DictRegistry(object):
+    """A simple registry for a mapping of objects."""
+
+    def __init__(self):
+        self._items = {}
+
+    def register_item(self, key, item):
+        """Add item to the registry."""
+        if key in self._items:
+            raise ValueError(
+                'Item already registered with key {0}'.format(key))
+        self._items[key] = item
+
+    @property
+    def registered_items(self):
+        """All the items that have been registered.
+
+        This cannot be used to modify the contents of the registry.
+        """
+        return copy.copy(self._items)
diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
new file mode 100644
index 00000000..d2dd4fec
--- /dev/null
+++ b/cloudinit/reporting.py
@@ -0,0 +1,122 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init.  See LICENCE file for license information.
+#
+# vi: ts=4 expandtab
+"""
+cloud-init reporting framework
+
+The reporting framework is intended to allow all parts of cloud-init to
+report events in a structured manner.
+"""
+
+import abc
+import logging
+
+from cloudinit.registry import DictRegistry
+
+
+FINISH_EVENT_TYPE = 'finish'
+START_EVENT_TYPE = 'start'
+
+DEFAULT_CONFIG = {
+    'logging': {'type': 'log'},
+}
+
+
+instantiated_handler_registry = DictRegistry()
+available_handlers = DictRegistry()
+
+
+class ReportingEvent(object):
+    """Encapsulation of event formatting."""
+
+    def __init__(self, event_type, name, description):
+        self.event_type = event_type
+        self.name = name
+        self.description = description
+
+    def as_string(self):
+        """The event represented as a string."""
+        return '{0}: {1}: {2}'.format(
+            self.event_type, self.name, self.description)
+
+
+class FinishReportingEvent(ReportingEvent):
+
+    def __init__(self, name, description, successful=None):
+        super(FinishReportingEvent, self).__init__(
+            FINISH_EVENT_TYPE, name, description)
+        self.successful = successful
+
+    def as_string(self):
+        if self.successful is None:
+            return super(FinishReportingEvent, self).as_string()
+        success_string = 'success' if self.successful else 'fail'
+        return '{0}: {1}: {2}: {3}'.format(
+            self.event_type, self.name, success_string, self.description)
+
+
+class ReportingHandler(object):
+
+    @abc.abstractmethod
+    def publish_event(self, event):
+        raise NotImplementedError
+
+
+class LogHandler(ReportingHandler):
+    """Publishes events to the cloud-init log at the ``INFO`` log level."""
+
+    def publish_event(self, event):
+        """Publish an event to the ``INFO`` log level."""
+        logger = logging.getLogger(
+            '.'.join([__name__, event.event_type, event.name]))
+        logger.info(event.as_string())
+
+
+def add_configuration(config):
+    for handler_name, handler_config in config.items():
+        handler_config = handler_config.copy()
+        cls = available_handlers.registered_items[handler_config.pop('type')]
+        instance = cls(**handler_config)
+        instantiated_handler_registry.register_item(handler_name, instance)
+
+
+def report_event(event):
+    """Report an event to all registered event handlers.
+
+    This should generally be called via one of the other functions in
+    the reporting module.
+
+    :param event_type:
+        The type of the event; this should be a constant from the
+        reporting module.
+    """
+    for _, handler in instantiated_handler_registry.registered_items.items():
+        handler.publish_event(event)
+
+
+def report_finish_event(event_name, event_description, successful=None):
+    """Report a "finish" event.
+
+    See :py:func:`.report_event` for parameter details.
+    """
+    event = FinishReportingEvent(event_name, event_description, successful)
+    return report_event(event)
+
+
+def report_start_event(event_name, event_description):
+    """Report a "start" event.
+
+    :param event_name:
+        The name of the event; this should be a topic which events would
+        share (e.g. it will be the same for start and finish events).
+
+    :param event_description:
+        A human-readable description of the event that has occurred.
+    """
+    event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
+    return report_event(event)
+
+
+available_handlers.register_item('log', LogHandler)
+add_configuration(DEFAULT_CONFIG)
diff --git a/tests/unittests/test_registry.py b/tests/unittests/test_registry.py
new file mode 100644
index 00000000..bcf01475
--- /dev/null
+++ b/tests/unittests/test_registry.py
@@ -0,0 +1,28 @@
+from cloudinit.registry import DictRegistry
+
+from .helpers import (mock, TestCase)
+
+
+class TestDictRegistry(TestCase):
+
+    def test_added_item_included_in_output(self):
+        registry = DictRegistry()
+        item_key, item_to_register = 'test_key', mock.Mock()
+        registry.register_item(item_key, item_to_register)
+        self.assertEqual({item_key: item_to_register},
+                         registry.registered_items)
+
+    def test_registry_starts_out_empty(self):
+        self.assertEqual({}, DictRegistry().registered_items)
+
+    def test_modifying_registered_items_isnt_exposed_to_other_callers(self):
+        registry = DictRegistry()
+        registry.registered_items['test_item'] = mock.Mock()
+        self.assertEqual({}, registry.registered_items)
+
+    def test_keys_cannot_be_replaced(self):
+        registry = DictRegistry()
+        item_key = 'test_key'
+        registry.register_item(item_key, mock.Mock())
+        self.assertRaises(ValueError,
+                          registry.register_item, item_key, mock.Mock())
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
new file mode 100644
index 00000000..f4011a79
--- /dev/null
+++ b/tests/unittests/test_reporting.py
@@ -0,0 +1,192 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init.  See LICENCE file for license information.
+#
+# vi: ts=4 expandtab
+
+from cloudinit import reporting
+
+from .helpers import (mock, TestCase)
+
+
+def _fake_registry():
+    return mock.Mock(registered_items={'a': mock.MagicMock(),
+                                       'b': mock.MagicMock()})
+
+
+class TestReportStartEvent(TestCase):
+
+    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+                new_callable=_fake_registry)
+    def test_report_start_event_passes_something_with_as_string_to_handlers(
+            self, instantiated_handler_registry):
+        event_name, event_description = 'my_test_event', 'my description'
+        reporting.report_start_event(event_name, event_description)
+        expected_string_representation = ': '.join(
+            ['start', event_name, event_description])
+        for _, handler in (
+                instantiated_handler_registry.registered_items.items()):
+            self.assertEqual(1, handler.publish_event.call_count)
+            event = handler.publish_event.call_args[0][0]
+            self.assertEqual(expected_string_representation, event.as_string())
+
+
+class TestReportFinishEvent(TestCase):
+
+    def _report_finish_event(self, successful=None):
+        event_name, event_description = 'my_test_event', 'my description'
+        reporting.report_finish_event(
+            event_name, event_description, successful=successful)
+        return event_name, event_description
+
+    def assertHandlersPassedObjectWithAsString(
+            self, handlers, expected_as_string):
+        for _, handler in handlers.items():
+            self.assertEqual(1, handler.publish_event.call_count)
+            event = handler.publish_event.call_args[0][0]
+            self.assertEqual(expected_as_string, event.as_string())
+
+    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+                new_callable=_fake_registry)
+    def test_report_finish_event_passes_something_with_as_string_to_handlers(
+            self, instantiated_handler_registry):
+        event_name, event_description = self._report_finish_event()
+        expected_string_representation = ': '.join(
+            ['finish', event_name, event_description])
+        self.assertHandlersPassedObjectWithAsString(
+            instantiated_handler_registry.registered_items,
+            expected_string_representation)
+
+    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+                new_callable=_fake_registry)
+    def test_reporting_successful_finish_has_sensible_string_repr(
+            self, instantiated_handler_registry):
+        event_name, event_description = self._report_finish_event(
+            successful=True)
+        expected_string_representation = ': '.join(
+            ['finish', event_name, 'success', event_description])
+        self.assertHandlersPassedObjectWithAsString(
+            instantiated_handler_registry.registered_items,
+            expected_string_representation)
+
+    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+                new_callable=_fake_registry)
+    def test_reporting_unsuccessful_finish_has_sensible_string_repr(
+            self, instantiated_handler_registry):
+        event_name, event_description = self._report_finish_event(
+            successful=False)
+        expected_string_representation = ': '.join(
+            ['finish', event_name, 'fail', event_description])
+        self.assertHandlersPassedObjectWithAsString(
+            instantiated_handler_registry.registered_items,
+            expected_string_representation)
+
+
+class TestReportingEvent(TestCase):
+
+    def test_as_string(self):
+        event_type, name, description = 'test_type', 'test_name', 'test_desc'
+        event = reporting.ReportingEvent(event_type, name, description)
+        expected_string_representation = ': '.join(
+            [event_type, name, description])
+        self.assertEqual(expected_string_representation, event.as_string())
+
+
+class TestReportingHandler(TestCase):
+
+    def test_no_default_publish_event_implementation(self):
+        self.assertRaises(NotImplementedError,
+                          reporting.ReportingHandler().publish_event, None)
+
+
+class TestLogHandler(TestCase):
+
+    @mock.patch.object(reporting.logging, 'getLogger')
+    def test_appropriate_logger_used(self, getLogger):
+        event_type, event_name = 'test_type', 'test_name'
+        event = reporting.ReportingEvent(event_type, event_name, 'description')
+        reporting.LogHandler().publish_event(event)
+        self.assertEqual(
+            [mock.call(
+                'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))],
+            getLogger.call_args_list)
+
+    @mock.patch.object(reporting.logging, 'getLogger')
+    def test_single_log_message_at_info_published(self, getLogger):
+        event = reporting.ReportingEvent('type', 'name', 'description')
+        reporting.LogHandler().publish_event(event)
+        self.assertEqual(1, getLogger.return_value.info.call_count)
+
+    @mock.patch.object(reporting.logging, 'getLogger')
+    def test_log_message_uses_event_as_string(self, getLogger):
+        event = reporting.ReportingEvent('type', 'name', 'description')
+        reporting.LogHandler().publish_event(event)
+        self.assertIn(event.as_string(),
+                      getLogger.return_value.info.call_args[0][0])
+
+
+class TestDefaultRegisteredHandler(TestCase):
+
+    def test_log_handler_registered_by_default(self):
+        registered_items = (
+            reporting.instantiated_handler_registry.registered_items)
+        for _, item in registered_items.items():
+            if isinstance(item, reporting.LogHandler):
+                break
+        else:
+            self.fail('No reporting LogHandler registered by default.')
+
+
+class TestReportingConfiguration(TestCase):
+
+    @mock.patch.object(reporting, 'instantiated_handler_registry')
+    def test_empty_configuration_doesnt_add_handlers(
+            self, instantiated_handler_registry):
+        reporting.add_configuration({})
+        self.assertEqual(
+            0, instantiated_handler_registry.register_item.call_count)
+
+    @mock.patch.object(
+        reporting, 'instantiated_handler_registry', reporting.DictRegistry())
+    @mock.patch.object(reporting, 'available_handlers')
+    def test_looks_up_handler_by_type_and_adds_it(self, available_handlers):
+        handler_type_name = 'test_handler'
+        handler_cls = mock.Mock()
+        available_handlers.registered_items = {handler_type_name: handler_cls}
+        handler_name = 'my_test_handler'
+        reporting.add_configuration(
+            {handler_name: {'type': handler_type_name}})
+        self.assertEqual(
+            {handler_name: handler_cls.return_value},
+            reporting.instantiated_handler_registry.registered_items)
+
+    @mock.patch.object(
+        reporting, 'instantiated_handler_registry', reporting.DictRegistry())
+    @mock.patch.object(reporting, 'available_handlers')
+    def test_uses_non_type_parts_of_config_dict_as_kwargs(
+            self, available_handlers):
+        handler_type_name = 'test_handler'
+        handler_cls = mock.Mock()
+        available_handlers.registered_items = {handler_type_name: handler_cls}
+        extra_kwargs = {'foo': 'bar', 'bar': 'baz'}
+        handler_config = extra_kwargs.copy()
+        handler_config.update({'type': handler_type_name})
+        handler_name = 'my_test_handler'
+        reporting.add_configuration({handler_name: handler_config})
+        self.assertEqual(
+            handler_cls.return_value,
+            reporting.instantiated_handler_registry.registered_items[
+                handler_name])
+        self.assertEqual([mock.call(**extra_kwargs)],
+                         handler_cls.call_args_list)
+
+    @mock.patch.object(
+        reporting, 'instantiated_handler_registry', reporting.DictRegistry())
+    @mock.patch.object(reporting, 'available_handlers')
+    def test_handler_config_not_modified(self, available_handlers):
+        handler_type_name = 'test_handler'
+        handler_cls = mock.Mock()
+        available_handlers.registered_items = {handler_type_name: handler_cls}
+        handler_config = {'type': handler_type_name, 'foo': 'bar'}
+        expected_handler_config = handler_config.copy()
+        reporting.add_configuration({'my_test_handler': handler_config})
+        self.assertEqual(expected_handler_config, handler_config)
-- 
cgit v1.2.3


From b5574a9925b29417a1b351e7b38c54bc7d144dba Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 30 Jul 2015 18:06:01 -0400
Subject: tests pass

---
 bin/cloud-init                    | 28 ++++++++++--
 cloudinit/reporting.py            | 91 +++++++++++++++++++++++++++++++++++----
 cloudinit/sources/__init__.py     | 16 ++++---
 cloudinit/stages.py               | 10 ++++-
 tests/unittests/test_reporting.py | 14 +++---
 5 files changed, 134 insertions(+), 25 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index 1d3e7ee3..7f21e49f 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -46,6 +46,7 @@ from cloudinit import sources
 from cloudinit import stages
 from cloudinit import templater
 from cloudinit import util
+from cloudinit import reporting
 from cloudinit import version
 
 from cloudinit.settings import (PER_INSTANCE, PER_ALWAYS, PER_ONCE,
@@ -313,7 +314,7 @@ def main_modules(action_name, args):
     # 5. Run the modules for the given stage name
     # 6. Done!
     w_msg = welcome_format("%s:%s" % (action_name, name))
-    init = stages.Init(ds_deps=[])
+    init = stages.Init(ds_deps=[], reporter=args.reporter)
     # Stage 1
     init.read_cfg(extract_fns(args))
     # Stage 2
@@ -549,6 +550,8 @@ def main():
                               ' found (use at your own risk)'),
                         dest='force',
                         default=False)
+
+    parser.set_defaults(reporter=None)
     subparsers = parser.add_subparsers()
 
     # Each action and its sub-options (if any)
@@ -595,6 +598,9 @@ def main():
                               help=("frequency of the module"),
                               required=False,
                               choices=list(FREQ_SHORT_NAMES.keys()))
+    parser_single.add_argument("--report", action="store_true",
+                               help="enable reporting",
+                               required=False)
     parser_single.add_argument("module_args", nargs="*",
                               metavar='argument',
                               help=('any additional arguments to'
@@ -617,8 +623,24 @@ def main():
     if name in ("modules", "init"):
         functor = status_wrapper
 
-    return util.log_time(logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
-                         get_uptime=True, func=functor, args=(name, args))
+    reporting = True
+    if name == "init":
+        if args.local:
+            rname, rdesc = ("init-local", "searching for local datasources")
+        else:
+            rname, rdesc = ("init-network", "searching for network datasources")
+    elif name == "modules":
+        rname, rdesc = ("modules-%s" % args.mode, "running modules for %s")
+    elif name == "single":
+        rname, rdesc = ("single/%s" % args.name,
+                        "running single module %s" % args.name)
+        reporting = args.report
+
+    reporter = reporting.ReportStack(rname, rdesc, reporting=reporting)
+    with reporter:
+        return util.log_time(
+            logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
+            get_uptime=True, func=functor, args=(name, args))
 
 
 if __name__ == '__main__':
diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
index d2dd4fec..c925f661 100644
--- a/cloudinit/reporting.py
+++ b/cloudinit/reporting.py
@@ -20,9 +20,18 @@ START_EVENT_TYPE = 'start'
 
 DEFAULT_CONFIG = {
     'logging': {'type': 'log'},
+    'print': {'type': 'print'},
 }
 
 
+class _nameset(set):
+    def __getattr__(self, name):
+        if name in self:
+            return name
+        raise AttributeError
+
+status = _nameset(("SUCCESS", "WARN", "FAIL"))
+
 instantiated_handler_registry = DictRegistry()
 available_handlers = DictRegistry()
 
@@ -43,17 +52,18 @@ class ReportingEvent(object):
 
 class FinishReportingEvent(ReportingEvent):
 
-    def __init__(self, name, description, successful=None):
+    def __init__(self, name, description, result=None):
         super(FinishReportingEvent, self).__init__(
             FINISH_EVENT_TYPE, name, description)
-        self.successful = successful
+        if result is None:
+            result = status.SUCCESS
+        self.result = result
+        if result not in status:
+            raise ValueError("Invalid result: %s" % result)
 
     def as_string(self):
-        if self.successful is None:
-            return super(FinishReportingEvent, self).as_string()
-        success_string = 'success' if self.successful else 'fail'
         return '{0}: {1}: {2}: {3}'.format(
-            self.event_type, self.name, success_string, self.description)
+            self.event_type, self.name, self.result, self.description)
 
 
 class ReportingHandler(object):
@@ -73,6 +83,11 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
+class PrintHandler(ReportingHandler):
+    def publish_event(self, event):
+        print(event.as_string())
+
+
 def add_configuration(config):
     for handler_name, handler_config in config.items():
         handler_config = handler_config.copy()
@@ -95,12 +110,12 @@ def report_event(event):
         handler.publish_event(event)
 
 
-def report_finish_event(event_name, event_description, successful=None):
+def report_finish_event(event_name, event_description, result):
     """Report a "finish" event.
 
     See :py:func:`.report_event` for parameter details.
     """
-    event = FinishReportingEvent(event_name, event_description, successful)
+    event = FinishReportingEvent(event_name, event_description, result)
     return report_event(event)
 
 
@@ -118,5 +133,65 @@ def report_start_event(event_name, event_description):
     return report_event(event)
 
 
+class ReportStack(object):
+    def __init__(self, name, description, parent=None, reporting=None,
+                 exc_result=None):
+        self.parent = parent
+        self.reporting = reporting
+        self.name = name
+        self.description = description
+
+        if exc_result is None:
+            exc_result = status.FAIL
+        self.exc_result = exc_result
+
+        if reporting is None:
+            # if reporting is specified respect it, otherwise use parent's value
+            if parent:
+                reporting = parent.reporting
+            else:
+                reporting = True
+        if parent:
+            self.fullname = '/'.join((name, parent.fullname,))
+        else:
+            self.fullname = self.name
+        self.children = {}
+
+    def __enter__(self):
+        self.exception = None
+        if self.reporting:
+            report_start_event(self.fullname, self.description)
+        if self.parent:
+            self.parent.children[self.name] = (None, None)
+        return self
+
+    def childrens_finish_info(self, result=None, description=None):
+        for result in (status.FAIL, status.WARN):
+            for name, (value, msg) in self.children.items():
+                if value == result:
+                    return (result, "[" + name + "]" + msg)
+        if result is None:
+            result = status.SUCCESS
+        if description is None:
+            description = self.description
+        return (result, description)
+
+    def finish_info(self, exc):
+        # return tuple of description, and value
+        if exc:
+            # by default, exceptions are fatal
+            return (self.exc_result, self.description)
+        return self.childrens_finish_info()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.exception = exc_value
+        (result, msg) = self.finish_info(exc_value)
+        if self.parent:
+            self.parent.children[self.name] = (result, msg)
+        if self.reporting:
+            report_finish_event(self.fullname, msg, result)
+
+        
 available_handlers.register_item('log', LogHandler)
+available_handlers.register_item('print', PrintHandler)
 add_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index a21c08c2..c4848d5d 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -27,6 +27,7 @@ import six
 
 from cloudinit import importer
 from cloudinit import log as logging
+from cloudinit import reporting
 from cloudinit import type_utils
 from cloudinit import user_data as ud
 from cloudinit import util
@@ -246,17 +247,22 @@ def normalize_pubkey_data(pubkey_data):
     return keys
 
 
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
+def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_list = list_sources(cfg_list, ds_deps, pkg_list)
     ds_names = [type_utils.obj_name(f) for f in ds_list]
     LOG.debug("Searching for data source in: %s", ds_names)
 
     for cls in ds_list:
+        myreporter = reporting.ReportStack(
+            "check-%s" % cls, "searching for %s" % cls,
+            parent=reporter, exc_result=reporting.status.WARN)
+            
         try:
-            LOG.debug("Seeing if we can get any data from %s", cls)
-            s = cls(sys_cfg, distro, paths)
-            if s.get_data():
-                return (s, type_utils.obj_name(cls))
+            with myreporter:
+                LOG.debug("Seeing if we can get any data from %s", cls)
+                s = cls(sys_cfg, distro, paths)
+                if s.get_data():
+                    return (s, type_utils.obj_name(cls))
         except Exception:
             util.logexc(LOG, "Getting data from %s failed", cls)
 
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index d28e765b..dbcdbece 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -46,6 +46,7 @@ from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import type_utils
 from cloudinit import util
+from cloudinit import reporting
 
 LOG = logging.getLogger(__name__)
 
@@ -53,7 +54,7 @@ NULL_DATA_SOURCE = None
 
 
 class Init(object):
-    def __init__(self, ds_deps=None):
+    def __init__(self, reporter=None, ds_deps=None):
         if ds_deps is not None:
             self.ds_deps = ds_deps
         else:
@@ -65,6 +66,11 @@ class Init(object):
         # Changed only when a fetch occurs
         self.datasource = NULL_DATA_SOURCE
 
+        if reporter is None:
+            reporter = reporting.ReportStack(
+                name="init-reporter", description="init-desc", reporting=False)
+        self.reporter = reporter
+
     def _reset(self, reset_ds=False):
         # Recreated on access
         self._cfg = None
@@ -246,7 +252,7 @@ class Init(object):
                                                self.paths,
                                                copy.deepcopy(self.ds_deps),
                                                cfg_list,
-                                               pkg_list)
+                                               pkg_list, self.reporter)
             LOG.info("Loaded datasource %s - %s", dsname, ds)
         self.datasource = ds
         # Ensure we adjust our path members datasource
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index f4011a79..5700118f 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -32,10 +32,10 @@ class TestReportStartEvent(TestCase):
 
 class TestReportFinishEvent(TestCase):
 
-    def _report_finish_event(self, successful=None):
+    def _report_finish_event(self, result=None):
         event_name, event_description = 'my_test_event', 'my description'
         reporting.report_finish_event(
-            event_name, event_description, successful=successful)
+            event_name, event_description, result=result)
         return event_name, event_description
 
     def assertHandlersPassedObjectWithAsString(
@@ -51,7 +51,7 @@ class TestReportFinishEvent(TestCase):
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event()
         expected_string_representation = ': '.join(
-            ['finish', event_name, event_description])
+            ['finish', event_name, reporting.status.SUCCESS, event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
@@ -61,9 +61,9 @@ class TestReportFinishEvent(TestCase):
     def test_reporting_successful_finish_has_sensible_string_repr(
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event(
-            successful=True)
+            result=reporting.status.SUCCESS)
         expected_string_representation = ': '.join(
-            ['finish', event_name, 'success', event_description])
+            ['finish', event_name, reporting.status.SUCCESS, event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
@@ -73,9 +73,9 @@ class TestReportFinishEvent(TestCase):
     def test_reporting_unsuccessful_finish_has_sensible_string_repr(
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event(
-            successful=False)
+            result=reporting.status.FAIL)
         expected_string_representation = ': '.join(
-            ['finish', event_name, 'fail', event_description])
+            ['finish', event_name, reporting.status.FAIL, event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
-- 
cgit v1.2.3


From 6f174b41496f133af92fb373f3b718eabdebfa05 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 30 Jul 2015 22:22:24 -0400
Subject: fix arg ordering

---
 bin/cloud-init      | 2 +-
 cloudinit/stages.py | 2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index b0396cdc..6a47e5e8 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -172,7 +172,7 @@ def main_init(name, args):
         w_msg = welcome_format(name)
     else:
         w_msg = welcome_format("%s-local" % (name))
-    init = stages.Init(deps)
+    init = stages.Init(ds_deps=deps, reporter=args.reporter)
     # Stage 1
     init.read_cfg(extract_fns(args))
     # Stage 2
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index dbcdbece..2bf7a1c4 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -54,7 +54,7 @@ NULL_DATA_SOURCE = None
 
 
 class Init(object):
-    def __init__(self, reporter=None, ds_deps=None):
+    def __init__(self, ds_deps=None, reporter=None):
         if ds_deps is not None:
             self.ds_deps = ds_deps
         else:
-- 
cgit v1.2.3


From b22302d8e2b539f61faede7efb3a163966bf170a Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 14:38:09 +0000
Subject: fix issues found when testing

---
 bin/cloud-init                |  4 ++--
 cloudinit/reporting.py        | 16 ++++++++++------
 cloudinit/sources/__init__.py |  5 +++--
 3 files changed, 15 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index 6a47e5e8..c808eda5 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -636,8 +636,8 @@ def main():
                         "running single module %s" % args.name)
         report_on = args.report
 
-    reporter = reporting.ReportStack(rname, rdesc, reporting=report_on)
-    with reporter:
+    args.reporter = reporting.ReportStack(rname, rdesc, reporting=report_on)
+    with args.reporter:
         return util.log_time(
             logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
             get_uptime=True, func=functor, args=(name, args))
diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
index c925f661..1bd7df0d 100644
--- a/cloudinit/reporting.py
+++ b/cloudinit/reporting.py
@@ -137,7 +137,6 @@ class ReportStack(object):
     def __init__(self, name, description, parent=None, reporting=None,
                  exc_result=None):
         self.parent = parent
-        self.reporting = reporting
         self.name = name
         self.description = description
 
@@ -145,18 +144,23 @@ class ReportStack(object):
             exc_result = status.FAIL
         self.exc_result = exc_result
 
+        # use parents reporting value if not provided
         if reporting is None:
-            # if reporting is specified respect it, otherwise use parent's value
             if parent:
                 reporting = parent.reporting
             else:
                 reporting = True
+        self.reporting = reporting
+
         if parent:
-            self.fullname = '/'.join((name, parent.fullname,))
+            self.fullname = '/'.join((parent.fullname, name,))
         else:
             self.fullname = self.name
         self.children = {}
 
+    def __repr__(self):
+        return ("%s reporting=%s" % (self.fullname, self.reporting))
+
     def __enter__(self):
         self.exception = None
         if self.reporting:
@@ -166,10 +170,10 @@ class ReportStack(object):
         return self
 
     def childrens_finish_info(self, result=None, description=None):
-        for result in (status.FAIL, status.WARN):
+        for cand_result in (status.FAIL, status.WARN):
             for name, (value, msg) in self.children.items():
-                if value == result:
-                    return (result, "[" + name + "]" + msg)
+                if value == cand_result:
+                    return (value, "[" + name + "]" + msg)
         if result is None:
             result = status.SUCCESS
         if description is None:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c4848d5d..f585c3e4 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -252,9 +252,10 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_names = [type_utils.obj_name(f) for f in ds_list]
     LOG.debug("Searching for data source in: %s", ds_names)
 
-    for cls in ds_list:
+    for i, cls in enumerate(ds_list):
+        name=ds_names[i].replace("DataSource", "")
         myreporter = reporting.ReportStack(
-            "check-%s" % cls, "searching for %s" % cls,
+            "check-%s" % name, "searching for %s" % name,
             parent=reporter, exc_result=reporting.status.WARN)
             
         try:
-- 
cgit v1.2.3


From cc923ca255f4ce8c23819e263066e34133f3dd31 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 15:23:04 +0000
Subject: add nicer formating and messages for datasource searching

---
 cloudinit/sources/__init__.py | 33 +++++++++++++++++++++++++++------
 1 file changed, 27 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index f585c3e4..c174a58f 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -247,22 +247,43 @@ def normalize_pubkey_data(pubkey_data):
     return keys
 
 
+class SearchReportStack(reporting.ReportStack):
+    def __init__(self, source, ds_deps, parent):
+        self.source = source.replace("DataSource", "")
+        name = "check-%s" % self.source
+        self.found = False
+        self.mode = "network" if DEP_NETWORK in ds_deps else "local"
+        description = "searching for %s data from %s" % (
+            self.mode, self.source)
+        super(SearchReportStack, self).__init__(
+            name=name, description=description, parent=parent,
+            exc_result=reporting.status.WARN)
+
+    def finish_info(self, exc):
+        # return tuple of description, and value
+        if exc:
+            # by default, exceptions are fatal
+            return (self.exc_result, self.description)
+        if self.found:
+            description = "found %s data from %s" % (self.mode, self.source)
+        else:
+            description = "no %s data found from %s" % (self.mode, self.source)
+        return self.childrens_finish_info(description=description)
+
+
 def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_list = list_sources(cfg_list, ds_deps, pkg_list)
     ds_names = [type_utils.obj_name(f) for f in ds_list]
     LOG.debug("Searching for data source in: %s", ds_names)
 
     for i, cls in enumerate(ds_list):
-        name=ds_names[i].replace("DataSource", "")
-        myreporter = reporting.ReportStack(
-            "check-%s" % name, "searching for %s" % name,
-            parent=reporter, exc_result=reporting.status.WARN)
-            
+        srcname=ds_names[i]
         try:
-            with myreporter:
+            with SearchReportStack(srcname, ds_deps, reporter) as rep:
                 LOG.debug("Seeing if we can get any data from %s", cls)
                 s = cls(sys_cfg, distro, paths)
                 if s.get_data():
+                    rep.found = True
                     return (s, type_utils.obj_name(cls))
         except Exception:
             util.logexc(LOG, "Getting data from %s failed", cls)
-- 
cgit v1.2.3


From f36706442b4c1913ea8f7953993b9e03f3adf623 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 16:12:37 +0000
Subject: address Daniel's comments in review

---
 bin/cloud-init                |  3 ++-
 cloudinit/reporting.py        | 34 +++++++++++++++-------------------
 cloudinit/sources/__init__.py |  7 +++----
 cloudinit/stages.py           |  3 ++-
 4 files changed, 22 insertions(+), 25 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index c808eda5..d0ac4c7f 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -636,7 +636,8 @@ def main():
                         "running single module %s" % args.name)
         report_on = args.report
 
-    args.reporter = reporting.ReportStack(rname, rdesc, reporting=report_on)
+    args.reporter = reporting.ReportStack(
+        rname, rdesc, reporting_enabled=report_on)
     with args.reporter:
         return util.log_time(
             logfunc=LOG.debug, msg="cloud-init mode '%s'" % name,
diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
index 1bd7df0d..154f4e03 100644
--- a/cloudinit/reporting.py
+++ b/cloudinit/reporting.py
@@ -11,6 +11,7 @@ report events in a structured manner.
 
 import abc
 import logging
+import sys
 
 from cloudinit.registry import DictRegistry
 
@@ -83,9 +84,9 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
-class PrintHandler(ReportingHandler):
+class StderrHandler(ReportingHandler):
     def publish_event(self, event):
-        print(event.as_string())
+        sys.stderr.write(event.as_string() + "\n")
 
 
 def add_configuration(config):
@@ -134,23 +135,20 @@ def report_start_event(event_name, event_description):
 
 
 class ReportStack(object):
-    def __init__(self, name, description, parent=None, reporting=None,
-                 exc_result=None):
+    def __init__(self, name, description, parent=None,
+                 reporting_enabled=None, result_on_exception=status.FAIL):
         self.parent = parent
         self.name = name
         self.description = description
-
-        if exc_result is None:
-            exc_result = status.FAIL
-        self.exc_result = exc_result
+        self.result_on_exception = result_on_exception
 
         # use parents reporting value if not provided
-        if reporting is None:
+        if reporting_enabled is None:
             if parent:
-                reporting = parent.reporting
+                reporting_enabled = parent.reporting_enabled
             else:
-                reporting = True
-        self.reporting = reporting
+                reporting_enabled = True
+        self.reporting_enabled = reporting_enabled
 
         if parent:
             self.fullname = '/'.join((parent.fullname, name,))
@@ -159,11 +157,10 @@ class ReportStack(object):
         self.children = {}
 
     def __repr__(self):
-        return ("%s reporting=%s" % (self.fullname, self.reporting))
+        return ("%s reporting=%s" % (self.fullname, self.reporting_enabled))
 
     def __enter__(self):
-        self.exception = None
-        if self.reporting:
+        if self.reporting_enabled:
             report_start_event(self.fullname, self.description)
         if self.parent:
             self.parent.children[self.name] = (None, None)
@@ -184,18 +181,17 @@ class ReportStack(object):
         # return tuple of description, and value
         if exc:
             # by default, exceptions are fatal
-            return (self.exc_result, self.description)
+            return (self.result_on_exception, self.description)
         return self.childrens_finish_info()
 
     def __exit__(self, exc_type, exc_value, traceback):
-        self.exception = exc_value
         (result, msg) = self.finish_info(exc_value)
         if self.parent:
             self.parent.children[self.name] = (result, msg)
-        if self.reporting:
+        if self.reporting_enabled:
             report_finish_event(self.fullname, msg, result)
 
         
 available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', PrintHandler)
+available_handlers.register_item('print', StderrHandler)
 add_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index c174a58f..0dc75f9e 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -257,7 +257,7 @@ class SearchReportStack(reporting.ReportStack):
             self.mode, self.source)
         super(SearchReportStack, self).__init__(
             name=name, description=description, parent=parent,
-            exc_result=reporting.status.WARN)
+            result_on_exception=reporting.status.WARN)
 
     def finish_info(self, exc):
         # return tuple of description, and value
@@ -276,10 +276,9 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_names = [type_utils.obj_name(f) for f in ds_list]
     LOG.debug("Searching for data source in: %s", ds_names)
 
-    for i, cls in enumerate(ds_list):
-        srcname=ds_names[i]
+    for name, cls in zip(ds_names, ds_list):
         try:
-            with SearchReportStack(srcname, ds_deps, reporter) as rep:
+            with SearchReportStack(name, ds_deps, reporter) as rep:
                 LOG.debug("Seeing if we can get any data from %s", cls)
                 s = cls(sys_cfg, distro, paths)
                 if s.get_data():
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 2bf7a1c4..82197d02 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -68,7 +68,8 @@ class Init(object):
 
         if reporter is None:
             reporter = reporting.ReportStack(
-                name="init-reporter", description="init-desc", reporting=False)
+                name="init-reporter", description="init-desc",
+                reporting_enabled=False)
         self.reporter = reporter
 
     def _reset(self, reset_ds=False):
-- 
cgit v1.2.3


From 4f4e6d1cf90928daa1ab339f687b3319454aefdd Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 16:31:26 +0000
Subject: move 'mode' out of SearchReportStack

---
 cloudinit/sources/__init__.py | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 0dc75f9e..6f2d2276 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -248,13 +248,12 @@ def normalize_pubkey_data(pubkey_data):
 
 
 class SearchReportStack(reporting.ReportStack):
-    def __init__(self, source, ds_deps, parent):
+    def __init__(self, source, mode, parent):
         self.source = source.replace("DataSource", "")
         name = "check-%s" % self.source
         self.found = False
-        self.mode = "network" if DEP_NETWORK in ds_deps else "local"
-        description = "searching for %s data from %s" % (
-            self.mode, self.source)
+        self.mode = mode
+        description = "searching for %s data from %s" % (mode, self.source)
         super(SearchReportStack, self).__init__(
             name=name, description=description, parent=parent,
             result_on_exception=reporting.status.WARN)
@@ -274,11 +273,12 @@ class SearchReportStack(reporting.ReportStack):
 def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_list = list_sources(cfg_list, ds_deps, pkg_list)
     ds_names = [type_utils.obj_name(f) for f in ds_list]
-    LOG.debug("Searching for data source in: %s", ds_names)
+    mode = "network" if DEP_NETWORK in ds_deps else "local"
+    LOG.debug("Searching for %s data source in: %s", mode, ds_names)
 
     for name, cls in zip(ds_names, ds_list):
         try:
-            with SearchReportStack(name, ds_deps, reporter) as rep:
+            with SearchReportStack(name, mode, reporter) as rep:
                 LOG.debug("Seeing if we can get any data from %s", cls)
                 s = cls(sys_cfg, distro, paths)
                 if s.get_data():
-- 
cgit v1.2.3


From a0c8ba1c53e2834c0d9be8df9b514df0a631e09d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 17:08:50 +0000
Subject: adjust searching so cache hits are logged

---
 cloudinit/stages.py | 13 ++++++++++---
 1 file changed, 10 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 82197d02..79d22538 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -241,9 +241,16 @@ class Init(object):
     def _get_data_source(self):
         if self.datasource is not NULL_DATA_SOURCE:
             return self.datasource
-        ds = self._restore_from_cache()
-        if ds:
-            LOG.debug("Restored from cache, datasource: %s", ds)
+
+        with reporting.ReportStack(
+            name="check-cache", description="attempting to read from cache",
+            parent=self.reporter) as myrep:
+                ds = self._restore_from_cache()
+                if ds:
+                    LOG.debug("Restored from cache, datasource: %s", ds)
+                    myrep.description = "restored from cache"
+                else:
+                    myrep.description = "no cache found"
         if not ds:
             (cfg_list, pkg_list) = self._get_datasources()
             # Deep copy so that user-data handlers can not modify
-- 
cgit v1.2.3


From 07b452e166b5d2ff34d5558b1dbba42ab0f1f23c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 31 Jul 2015 19:27:52 +0000
Subject: plumb the rest the reporting through

---
 bin/cloud-init                |  9 +++++----
 cloudinit/cloud.py            |  8 +++++++-
 cloudinit/reporting.py        | 32 +++++++++++++++++++++-----------
 cloudinit/sources/__init__.py | 32 +++++++-------------------------
 cloudinit/stages.py           | 29 ++++++++++++++++++++++++-----
 5 files changed, 64 insertions(+), 46 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index de3b9fbf..d369a806 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -284,7 +284,7 @@ def main_init(name, args):
         return (init.datasource, ["Consuming user data failed!"])
 
     # Stage 8 - re-read and apply relevant cloud-config to include user-data
-    mods = stages.Modules(init, extract_fns(args))
+    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
     # Stage 9
     try:
         outfmt_orig = outfmt
@@ -329,7 +329,7 @@ def main_modules(action_name, args):
         if not args.force:
             return [(msg)]
     # Stage 3
-    mods = stages.Modules(init, extract_fns(args))
+    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
     # Stage 4
     try:
         LOG.debug("Closing stdin")
@@ -384,7 +384,7 @@ def main_single(name, args):
         if not args.force:
             return 1
     # Stage 3
-    mods = stages.Modules(init, extract_fns(args))
+    mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
     mod_args = args.module_args
     if mod_args:
         LOG.debug("Using passed in arguments %s", mod_args)
@@ -630,7 +630,8 @@ def main():
         else:
             rname, rdesc = ("init-network", "searching for network datasources")
     elif name == "modules":
-        rname, rdesc = ("modules-%s" % args.mode, "running modules for %s")
+        rname, rdesc = ("modules-%s" % args.mode,
+                        "running modules for %s" % args.mode)
     elif name == "single":
         rname, rdesc = ("single/%s" % args.name,
                         "running single module %s" % args.name)
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 95e0cfb2..71eb80eb 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -40,12 +40,18 @@ LOG = logging.getLogger(__name__)
 
 
 class Cloud(object):
-    def __init__(self, datasource, paths, cfg, distro, runners):
+    def __init__(self, datasource, paths, cfg, distro, runners, reporter=None):
         self.datasource = datasource
         self.paths = paths
         self.distro = distro
         self._cfg = cfg
         self._runners = runners
+        if reporter is None:
+            reporter = reporting.ReportStack(
+                name="unnamed-cloud-reporter",
+                description="unnamed-cloud-reporter",
+                reporting_enabled=False)
+        self.reporter = reporter
 
     # If a 'user' manipulates logging or logging services
     # it is typically useful to cause the logging to be
diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
index 154f4e03..08014c70 100644
--- a/cloudinit/reporting.py
+++ b/cloudinit/reporting.py
@@ -86,7 +86,8 @@ class LogHandler(ReportingHandler):
 
 class StderrHandler(ReportingHandler):
     def publish_event(self, event):
-        sys.stderr.write(event.as_string() + "\n")
+        #sys.stderr.write(event.as_string() + "\n")
+        print(event.as_string())
 
 
 def add_configuration(config):
@@ -135,12 +136,14 @@ def report_start_event(event_name, event_description):
 
 
 class ReportStack(object):
-    def __init__(self, name, description, parent=None,
+    def __init__(self, name, description, message=None, parent=None,
                  reporting_enabled=None, result_on_exception=status.FAIL):
         self.parent = parent
         self.name = name
         self.description = description
+        self.message = message
         self.result_on_exception = result_on_exception
+        self.result = None
 
         # use parents reporting value if not provided
         if reporting_enabled is None:
@@ -160,28 +163,35 @@ class ReportStack(object):
         return ("%s reporting=%s" % (self.fullname, self.reporting_enabled))
 
     def __enter__(self):
+        self.result = None
         if self.reporting_enabled:
             report_start_event(self.fullname, self.description)
         if self.parent:
             self.parent.children[self.name] = (None, None)
         return self
 
-    def childrens_finish_info(self, result=None, description=None):
+    def childrens_finish_info(self):
         for cand_result in (status.FAIL, status.WARN):
             for name, (value, msg) in self.children.items():
                 if value == cand_result:
                     return (value, "[" + name + "]" + msg)
-        if result is None:
-            result = status.SUCCESS
-        if description is None:
-            description = self.description
-        return (result, description)
-
+        return (self.result, self.message)
+
+    @property
+    def message(self):
+        if self._message is not None:
+            return self._message
+        return self.description
+
+    @message.setter
+    def message(self, value):
+        self._message = value
+       
+        
     def finish_info(self, exc):
         # return tuple of description, and value
         if exc:
-            # by default, exceptions are fatal
-            return (self.result_on_exception, self.description)
+            return (self.result_on_exception, self.message)
         return self.childrens_finish_info()
 
     def __exit__(self, exc_type, exc_value, traceback):
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 6f2d2276..3b48f173 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -247,29 +247,6 @@ def normalize_pubkey_data(pubkey_data):
     return keys
 
 
-class SearchReportStack(reporting.ReportStack):
-    def __init__(self, source, mode, parent):
-        self.source = source.replace("DataSource", "")
-        name = "check-%s" % self.source
-        self.found = False
-        self.mode = mode
-        description = "searching for %s data from %s" % (mode, self.source)
-        super(SearchReportStack, self).__init__(
-            name=name, description=description, parent=parent,
-            result_on_exception=reporting.status.WARN)
-
-    def finish_info(self, exc):
-        # return tuple of description, and value
-        if exc:
-            # by default, exceptions are fatal
-            return (self.exc_result, self.description)
-        if self.found:
-            description = "found %s data from %s" % (self.mode, self.source)
-        else:
-            description = "no %s data found from %s" % (self.mode, self.source)
-        return self.childrens_finish_info(description=description)
-
-
 def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     ds_list = list_sources(cfg_list, ds_deps, pkg_list)
     ds_names = [type_utils.obj_name(f) for f in ds_list]
@@ -277,12 +254,17 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     LOG.debug("Searching for %s data source in: %s", mode, ds_names)
 
     for name, cls in zip(ds_names, ds_list):
+        myrep = reporting.ReportStack(
+            name="search-%s-%s" % (mode, name.replace("DataSource", "")),
+            description="searching for %s data from %s" % (mode, name),
+            message = "no %s data found from %s" % (mode, name),
+            parent=reporter)
         try:
-            with SearchReportStack(name, mode, reporter) as rep:
+            with myrep:
                 LOG.debug("Seeing if we can get any data from %s", cls)
                 s = cls(sys_cfg, distro, paths)
                 if s.get_data():
-                    rep.found = True
+                    myrep.message = "found %s data from %s" % (mode, name)
                     return (s, type_utils.obj_name(cls))
         except Exception:
             util.logexc(LOG, "Getting data from %s failed", cls)
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 79d22538..8c79ae4e 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -341,7 +341,8 @@ class Init(object):
         # Form the needed options to cloudify our members
         return cloud.Cloud(self.datasource,
                            self.paths, self.cfg,
-                           self.distro, helpers.Runners(self.paths))
+                           self.distro, helpers.Runners(self.paths),
+                           reporter=self.reporter)
 
     def update(self):
         if not self._write_to_cache():
@@ -507,8 +508,14 @@ class Init(object):
     def consume_data(self, frequency=PER_INSTANCE):
         # Consume the userdata first, because we need want to let the part
         # handlers run first (for merging stuff)
-        self._consume_userdata(frequency)
-        self._consume_vendordata(frequency)
+        with reporting.ReportStack(
+            "consume-user-data", "reading and applying user-data",
+            parent=self.reporter):
+                self._consume_userdata(frequency)
+        with reporting.ReportStack(
+            "consume-vendor-data", "reading and applying vendor-data",
+            parent=self.reporter):
+                self._consume_userdata(frequency)
 
         # Perform post-consumption adjustments so that
         # modules that run during the init stage reflect
@@ -581,11 +588,12 @@ class Init(object):
 
 
 class Modules(object):
-    def __init__(self, init, cfg_files=None):
+    def __init__(self, init, cfg_files=None, reporter=None):
         self.init = init
         self.cfg_files = cfg_files
         # Created on first use
         self._cached_cfg = None
+        self.reporter = reporter
 
     @property
     def cfg(self):
@@ -695,7 +703,18 @@ class Modules(object):
                 which_ran.append(name)
                 # This name will affect the semaphore name created
                 run_name = "config-%s" % (name)
-                cc.run(run_name, mod.handle, func_args, freq=freq)
+
+                desc="running %s with frequency %s" % (run_name, freq)
+                myrep = reporting.ReportStack(
+                    name=run_name, description=desc, parent=self.reporter)
+
+                with myrep:
+                    ran, _r = cc.run(run_name, mod.handle, func_args, freq=freq)
+                    if ran:
+                        myrep.message = "%s ran successfully" % run_name
+                    else:
+                        myrep.message = "%s previously ran" % run_name
+                    
             except Exception as e:
                 util.logexc(LOG, "Running module %s (%s) failed", name, mod)
                 failures.append((name, e))
-- 
cgit v1.2.3


From 89c5936c7c1fb6d172cd0eee9c5f9aa2cd5e2053 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Sun, 2 Aug 2015 16:50:47 -0400
Subject: sync with 2.0 trunk on reporting

---
 cloudinit/reporting.py          | 207 ----------------------------------------
 cloudinit/reporting/__init__.py | 100 +++++++++++++++++++
 cloudinit/reporting/handlers.py |  25 +++++
 3 files changed, 125 insertions(+), 207 deletions(-)
 delete mode 100644 cloudinit/reporting.py
 create mode 100644 cloudinit/reporting/__init__.py
 create mode 100644 cloudinit/reporting/handlers.py

(limited to 'cloudinit')

diff --git a/cloudinit/reporting.py b/cloudinit/reporting.py
deleted file mode 100644
index 08014c70..00000000
--- a/cloudinit/reporting.py
+++ /dev/null
@@ -1,207 +0,0 @@
-# Copyright 2015 Canonical Ltd.
-# This file is part of cloud-init.  See LICENCE file for license information.
-#
-# vi: ts=4 expandtab
-"""
-cloud-init reporting framework
-
-The reporting framework is intended to allow all parts of cloud-init to
-report events in a structured manner.
-"""
-
-import abc
-import logging
-import sys
-
-from cloudinit.registry import DictRegistry
-
-
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
-
-DEFAULT_CONFIG = {
-    'logging': {'type': 'log'},
-    'print': {'type': 'print'},
-}
-
-
-class _nameset(set):
-    def __getattr__(self, name):
-        if name in self:
-            return name
-        raise AttributeError
-
-status = _nameset(("SUCCESS", "WARN", "FAIL"))
-
-instantiated_handler_registry = DictRegistry()
-available_handlers = DictRegistry()
-
-
-class ReportingEvent(object):
-    """Encapsulation of event formatting."""
-
-    def __init__(self, event_type, name, description):
-        self.event_type = event_type
-        self.name = name
-        self.description = description
-
-    def as_string(self):
-        """The event represented as a string."""
-        return '{0}: {1}: {2}'.format(
-            self.event_type, self.name, self.description)
-
-
-class FinishReportingEvent(ReportingEvent):
-
-    def __init__(self, name, description, result=None):
-        super(FinishReportingEvent, self).__init__(
-            FINISH_EVENT_TYPE, name, description)
-        if result is None:
-            result = status.SUCCESS
-        self.result = result
-        if result not in status:
-            raise ValueError("Invalid result: %s" % result)
-
-    def as_string(self):
-        return '{0}: {1}: {2}: {3}'.format(
-            self.event_type, self.name, self.result, self.description)
-
-
-class ReportingHandler(object):
-
-    @abc.abstractmethod
-    def publish_event(self, event):
-        raise NotImplementedError
-
-
-class LogHandler(ReportingHandler):
-    """Publishes events to the cloud-init log at the ``INFO`` log level."""
-
-    def publish_event(self, event):
-        """Publish an event to the ``INFO`` log level."""
-        logger = logging.getLogger(
-            '.'.join([__name__, event.event_type, event.name]))
-        logger.info(event.as_string())
-
-
-class StderrHandler(ReportingHandler):
-    def publish_event(self, event):
-        #sys.stderr.write(event.as_string() + "\n")
-        print(event.as_string())
-
-
-def add_configuration(config):
-    for handler_name, handler_config in config.items():
-        handler_config = handler_config.copy()
-        cls = available_handlers.registered_items[handler_config.pop('type')]
-        instance = cls(**handler_config)
-        instantiated_handler_registry.register_item(handler_name, instance)
-
-
-def report_event(event):
-    """Report an event to all registered event handlers.
-
-    This should generally be called via one of the other functions in
-    the reporting module.
-
-    :param event_type:
-        The type of the event; this should be a constant from the
-        reporting module.
-    """
-    for _, handler in instantiated_handler_registry.registered_items.items():
-        handler.publish_event(event)
-
-
-def report_finish_event(event_name, event_description, result):
-    """Report a "finish" event.
-
-    See :py:func:`.report_event` for parameter details.
-    """
-    event = FinishReportingEvent(event_name, event_description, result)
-    return report_event(event)
-
-
-def report_start_event(event_name, event_description):
-    """Report a "start" event.
-
-    :param event_name:
-        The name of the event; this should be a topic which events would
-        share (e.g. it will be the same for start and finish events).
-
-    :param event_description:
-        A human-readable description of the event that has occurred.
-    """
-    event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
-    return report_event(event)
-
-
-class ReportStack(object):
-    def __init__(self, name, description, message=None, parent=None,
-                 reporting_enabled=None, result_on_exception=status.FAIL):
-        self.parent = parent
-        self.name = name
-        self.description = description
-        self.message = message
-        self.result_on_exception = result_on_exception
-        self.result = None
-
-        # use parents reporting value if not provided
-        if reporting_enabled is None:
-            if parent:
-                reporting_enabled = parent.reporting_enabled
-            else:
-                reporting_enabled = True
-        self.reporting_enabled = reporting_enabled
-
-        if parent:
-            self.fullname = '/'.join((parent.fullname, name,))
-        else:
-            self.fullname = self.name
-        self.children = {}
-
-    def __repr__(self):
-        return ("%s reporting=%s" % (self.fullname, self.reporting_enabled))
-
-    def __enter__(self):
-        self.result = None
-        if self.reporting_enabled:
-            report_start_event(self.fullname, self.description)
-        if self.parent:
-            self.parent.children[self.name] = (None, None)
-        return self
-
-    def childrens_finish_info(self):
-        for cand_result in (status.FAIL, status.WARN):
-            for name, (value, msg) in self.children.items():
-                if value == cand_result:
-                    return (value, "[" + name + "]" + msg)
-        return (self.result, self.message)
-
-    @property
-    def message(self):
-        if self._message is not None:
-            return self._message
-        return self.description
-
-    @message.setter
-    def message(self, value):
-        self._message = value
-       
-        
-    def finish_info(self, exc):
-        # return tuple of description, and value
-        if exc:
-            return (self.result_on_exception, self.message)
-        return self.childrens_finish_info()
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        (result, msg) = self.finish_info(exc_value)
-        if self.parent:
-            self.parent.children[self.name] = (result, msg)
-        if self.reporting_enabled:
-            report_finish_event(self.fullname, msg, result)
-
-        
-available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', StderrHandler)
-add_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
new file mode 100644
index 00000000..b0364eec
--- /dev/null
+++ b/cloudinit/reporting/__init__.py
@@ -0,0 +1,100 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init.  See LICENCE file for license information.
+#
+# vi: ts=4 expandtab
+"""
+cloud-init reporting framework
+
+The reporting framework is intended to allow all parts of cloud-init to
+report events in a structured manner.
+"""
+
+from cloudinit.registry import DictRegistry
+from cloudinit.reporting.handlers import available_handlers
+
+
+FINISH_EVENT_TYPE = 'finish'
+START_EVENT_TYPE = 'start'
+
+DEFAULT_CONFIG = {
+    'logging': {'type': 'log'},
+}
+
+instantiated_handler_registry = DictRegistry()
+
+
+class ReportingEvent(object):
+    """Encapsulation of event formatting."""
+
+    def __init__(self, event_type, name, description):
+        self.event_type = event_type
+        self.name = name
+        self.description = description
+
+    def as_string(self):
+        """The event represented as a string."""
+        return '{0}: {1}: {2}'.format(
+            self.event_type, self.name, self.description)
+
+
+class FinishReportingEvent(ReportingEvent):
+
+    def __init__(self, name, description, successful=None):
+        super(FinishReportingEvent, self).__init__(
+            FINISH_EVENT_TYPE, name, description)
+        self.successful = successful
+
+    def as_string(self):
+        if self.successful is None:
+            return super(FinishReportingEvent, self).as_string()
+        success_string = 'success' if self.successful else 'fail'
+        return '{0}: {1}: {2}: {3}'.format(
+            self.event_type, self.name, success_string, self.description)
+
+
+def add_configuration(config):
+    for handler_name, handler_config in config.items():
+        handler_config = handler_config.copy()
+        cls = available_handlers.registered_items[handler_config.pop('type')]
+        instance = cls(**handler_config)
+        instantiated_handler_registry.register_item(handler_name, instance)
+
+
+def report_event(event):
+    """Report an event to all registered event handlers.
+
+    This should generally be called via one of the other functions in
+    the reporting module.
+
+    :param event_type:
+        The type of the event; this should be a constant from the
+        reporting module.
+    """
+    for _, handler in instantiated_handler_registry.registered_items.items():
+        handler.publish_event(event)
+
+
+def report_finish_event(event_name, event_description, successful=None):
+    """Report a "finish" event.
+
+    See :py:func:`.report_event` for parameter details.
+    """
+    event = FinishReportingEvent(event_name, event_description, successful)
+    return report_event(event)
+
+
+def report_start_event(event_name, event_description):
+    """Report a "start" event.
+
+    :param event_name:
+        The name of the event; this should be a topic which events would
+        share (e.g. it will be the same for start and finish events).
+
+    :param event_description:
+        A human-readable description of the event that has occurred.
+    """
+    event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
+    return report_event(event)
+
+
+add_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
new file mode 100644
index 00000000..be323f53
--- /dev/null
+++ b/cloudinit/reporting/handlers.py
@@ -0,0 +1,25 @@
+import abc
+import logging
+
+from cloudinit.registry import DictRegistry
+
+
+class ReportingHandler(object):
+
+    @abc.abstractmethod
+    def publish_event(self, event):
+        raise NotImplementedError
+
+
+class LogHandler(ReportingHandler):
+    """Publishes events to the cloud-init log at the ``INFO`` log level."""
+
+    def publish_event(self, event):
+        """Publish an event to the ``INFO`` log level."""
+        logger = logging.getLogger(
+            '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
+        logger.info(event.as_string())
+
+
+available_handlers = DictRegistry()
+available_handlers.register_item('log', LogHandler)
-- 
cgit v1.2.3


From 89c564a6fd5ac89869f83541370557e3fa58495c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Sun, 2 Aug 2015 17:51:40 -0400
Subject: fix tests from sync change ReportStack to ReportEventStack change
 default ReportEventStack to be status.SUCCESS instead of None

---
 bin/cloud-init                    |  3 +-
 cloudinit/cloud.py                |  2 +-
 cloudinit/reporting/__init__.py   | 91 +++++++++++++++++++++++++++++++++++----
 cloudinit/reporting/handlers.py   |  7 +++
 cloudinit/sources/__init__.py     |  2 +-
 cloudinit/stages.py               | 10 ++---
 tests/unittests/test_reporting.py | 19 ++++----
 7 files changed, 109 insertions(+), 25 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index d369a806..51253c42 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -637,7 +637,8 @@ def main():
                         "running single module %s" % args.name)
         report_on = args.report
 
-    args.reporter = reporting.ReportStack(
+    reporting.add_configuration({'print': {'type': 'print'}})
+    args.reporter = reporting.ReportEventStack(
         rname, rdesc, reporting_enabled=report_on)
     with args.reporter:
         return util.log_time(
diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index 71eb80eb..a0fb42a3 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -47,7 +47,7 @@ class Cloud(object):
         self._cfg = cfg
         self._runners = runners
         if reporter is None:
-            reporter = reporting.ReportStack(
+            reporter = reporting.ReportEventStack(
                 name="unnamed-cloud-reporter",
                 description="unnamed-cloud-reporter",
                 reporting_enabled=False)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index b0364eec..78dde715 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -22,6 +22,15 @@ DEFAULT_CONFIG = {
 
 instantiated_handler_registry = DictRegistry()
 
+class _nameset(set):
+    def __getattr__(self, name):
+        if name in self:
+            return name
+        raise AttributeError("%s not a valid value" % name)
+
+
+status = _nameset(("SUCCESS", "WARN", "FAIL"))
+
 
 class ReportingEvent(object):
     """Encapsulation of event formatting."""
@@ -39,17 +48,16 @@ class ReportingEvent(object):
 
 class FinishReportingEvent(ReportingEvent):
 
-    def __init__(self, name, description, successful=None):
+    def __init__(self, name, description, result=status.SUCCESS):
         super(FinishReportingEvent, self).__init__(
             FINISH_EVENT_TYPE, name, description)
-        self.successful = successful
+        self.result = result
+        if result not in status:
+            raise ValueError("Invalid result: %s" % result)
 
     def as_string(self):
-        if self.successful is None:
-            return super(FinishReportingEvent, self).as_string()
-        success_string = 'success' if self.successful else 'fail'
         return '{0}: {1}: {2}: {3}'.format(
-            self.event_type, self.name, success_string, self.description)
+            self.event_type, self.name, self.result, self.description)
 
 
 def add_configuration(config):
@@ -74,12 +82,13 @@ def report_event(event):
         handler.publish_event(event)
 
 
-def report_finish_event(event_name, event_description, successful=None):
+def report_finish_event(event_name, event_description,
+                        result=status.SUCCESS):
     """Report a "finish" event.
 
     See :py:func:`.report_event` for parameter details.
     """
-    event = FinishReportingEvent(event_name, event_description, successful)
+    event = FinishReportingEvent(event_name, event_description, result)
     return report_event(event)
 
 
@@ -97,4 +106,70 @@ def report_start_event(event_name, event_description):
     return report_event(event)
 
 
+class ReportEventStack(object):
+    def __init__(self, name, description, message=None, parent=None,
+                 reporting_enabled=None, result_on_exception=status.FAIL):
+        self.parent = parent
+        self.name = name
+        self.description = description
+        self.message = message
+        self.result_on_exception = result_on_exception
+        self.result = status.SUCCESS
+
+        # use parents reporting value if not provided
+        if reporting_enabled is None:
+            if parent:
+                reporting_enabled = parent.reporting_enabled
+            else:
+                reporting_enabled = True
+        self.reporting_enabled = reporting_enabled
+
+        if parent:
+            self.fullname = '/'.join((parent.fullname, name,))
+        else:
+            self.fullname = self.name
+        self.children = {}
+
+    def __repr__(self):
+        return ("%s reporting=%s" % (self.fullname, self.reporting_enabled))
+
+    def __enter__(self):
+        self.result = status.SUCCESS
+        if self.reporting_enabled:
+            report_start_event(self.fullname, self.description)
+        if self.parent:
+            self.parent.children[self.name] = (None, None)
+        return self
+
+    def childrens_finish_info(self):
+        for cand_result in (status.FAIL, status.WARN):
+            for name, (value, msg) in self.children.items():
+                if value == cand_result:
+                    return (value, "[" + name + "]" + msg)
+        return (self.result, self.message)
+
+    @property
+    def message(self):
+        if self._message is not None:
+            return self._message
+        return self.description
+
+    @message.setter
+    def message(self, value):
+        self._message = value
+       
+    def finish_info(self, exc):
+        # return tuple of description, and value
+        if exc:
+            return (self.result_on_exception, self.message)
+        return self.childrens_finish_info()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        (result, msg) = self.finish_info(exc_value)
+        if self.parent:
+            self.parent.children[self.name] = (result, msg)
+        if self.reporting_enabled:
+            report_finish_event(self.fullname, msg, result)
+
+
 add_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index be323f53..1d5ca524 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -21,5 +21,12 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
+class StderrHandler(ReportingHandler):
+    def publish_event(self, event):
+        #sys.stderr.write(event.as_string() + "\n")
+        print(event.as_string())
+
+
 available_handlers = DictRegistry()
 available_handlers.register_item('log', LogHandler)
+available_handlers.register_item('print', StderrHandler)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 3b48f173..d07cf1fa 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -254,7 +254,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     LOG.debug("Searching for %s data source in: %s", mode, ds_names)
 
     for name, cls in zip(ds_names, ds_list):
-        myrep = reporting.ReportStack(
+        myrep = reporting.ReportEventStack(
             name="search-%s-%s" % (mode, name.replace("DataSource", "")),
             description="searching for %s data from %s" % (mode, name),
             message = "no %s data found from %s" % (mode, name),
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 8c79ae4e..42989bb4 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -67,7 +67,7 @@ class Init(object):
         self.datasource = NULL_DATA_SOURCE
 
         if reporter is None:
-            reporter = reporting.ReportStack(
+            reporter = reporting.ReportEventStack(
                 name="init-reporter", description="init-desc",
                 reporting_enabled=False)
         self.reporter = reporter
@@ -242,7 +242,7 @@ class Init(object):
         if self.datasource is not NULL_DATA_SOURCE:
             return self.datasource
 
-        with reporting.ReportStack(
+        with reporting.ReportEventStack(
             name="check-cache", description="attempting to read from cache",
             parent=self.reporter) as myrep:
                 ds = self._restore_from_cache()
@@ -508,11 +508,11 @@ class Init(object):
     def consume_data(self, frequency=PER_INSTANCE):
         # Consume the userdata first, because we need want to let the part
         # handlers run first (for merging stuff)
-        with reporting.ReportStack(
+        with reporting.ReportEventStack(
             "consume-user-data", "reading and applying user-data",
             parent=self.reporter):
                 self._consume_userdata(frequency)
-        with reporting.ReportStack(
+        with reporting.ReportEventStack(
             "consume-vendor-data", "reading and applying vendor-data",
             parent=self.reporter):
                 self._consume_userdata(frequency)
@@ -705,7 +705,7 @@ class Modules(object):
                 run_name = "config-%s" % (name)
 
                 desc="running %s with frequency %s" % (run_name, freq)
-                myrep = reporting.ReportStack(
+                myrep = reporting.ReportEventStack(
                     name=run_name, description=desc, parent=self.reporter)
 
                 with myrep:
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 5700118f..4f4cf3a4 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -32,7 +32,7 @@ class TestReportStartEvent(TestCase):
 
 class TestReportFinishEvent(TestCase):
 
-    def _report_finish_event(self, result=None):
+    def _report_finish_event(self, result=reporting.status.SUCCESS):
         event_name, event_description = 'my_test_event', 'my description'
         reporting.report_finish_event(
             event_name, event_description, result=result)
@@ -95,31 +95,32 @@ class TestReportingHandler(TestCase):
 
     def test_no_default_publish_event_implementation(self):
         self.assertRaises(NotImplementedError,
-                          reporting.ReportingHandler().publish_event, None)
+                          reporting.handlers.ReportingHandler().publish_event,
+                          None)
 
 
 class TestLogHandler(TestCase):
 
-    @mock.patch.object(reporting.logging, 'getLogger')
+    @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_appropriate_logger_used(self, getLogger):
         event_type, event_name = 'test_type', 'test_name'
         event = reporting.ReportingEvent(event_type, event_name, 'description')
-        reporting.LogHandler().publish_event(event)
+        reporting.handlers.LogHandler().publish_event(event)
         self.assertEqual(
             [mock.call(
                 'cloudinit.reporting.{0}.{1}'.format(event_type, event_name))],
             getLogger.call_args_list)
 
-    @mock.patch.object(reporting.logging, 'getLogger')
+    @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_single_log_message_at_info_published(self, getLogger):
         event = reporting.ReportingEvent('type', 'name', 'description')
-        reporting.LogHandler().publish_event(event)
+        reporting.handlers.LogHandler().publish_event(event)
         self.assertEqual(1, getLogger.return_value.info.call_count)
 
-    @mock.patch.object(reporting.logging, 'getLogger')
+    @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_log_message_uses_event_as_string(self, getLogger):
         event = reporting.ReportingEvent('type', 'name', 'description')
-        reporting.LogHandler().publish_event(event)
+        reporting.handlers.LogHandler().publish_event(event)
         self.assertIn(event.as_string(),
                       getLogger.return_value.info.call_args[0][0])
 
@@ -130,7 +131,7 @@ class TestDefaultRegisteredHandler(TestCase):
         registered_items = (
             reporting.instantiated_handler_registry.registered_items)
         for _, item in registered_items.items():
-            if isinstance(item, reporting.LogHandler):
+            if isinstance(item, reporting.handlers.LogHandler):
                 break
         else:
             self.fail('No reporting LogHandler registered by default.')
-- 
cgit v1.2.3


From e29c07adc1aa9d042ae790d1cb900a6a51a85952 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Sun, 2 Aug 2015 18:06:50 -0400
Subject: event name doesnt need mode as it is run through init-local or
 init-net

---
 cloudinit/sources/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index d07cf1fa..cf50c1fb 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -255,7 +255,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
 
     for name, cls in zip(ds_names, ds_list):
         myrep = reporting.ReportEventStack(
-            name="search-%s-%s" % (mode, name.replace("DataSource", "")),
+            name="search-%s" % name.replace("DataSource", ""),
             description="searching for %s data from %s" % (mode, name),
             message = "no %s data found from %s" % (mode, name),
             parent=reporter)
-- 
cgit v1.2.3


From 0fdba48edab8bc4894d90e43fcca977f21bbd202 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 4 Aug 2015 21:17:43 -0500
Subject: fix lack of import in cloud.py

---
 cloudinit/cloud.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index a0fb42a3..edee3887 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -24,6 +24,7 @@ import copy
 import os
 
 from cloudinit import log as logging
+from cloudinit import reporting
 
 LOG = logging.getLogger(__name__)
 
-- 
cgit v1.2.3


From 9f49cf601fd7bedb429d2bfcd7e877c9ed5f3690 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 4 Aug 2015 21:50:08 -0500
Subject: fix all tests (were broken due to copied code call to userdata twice

---
 cloudinit/stages.py | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 42989bb4..7b489b9f 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -243,14 +243,14 @@ class Init(object):
             return self.datasource
 
         with reporting.ReportEventStack(
-            name="check-cache", description="attempting to read from cache",
-            parent=self.reporter) as myrep:
-                ds = self._restore_from_cache()
-                if ds:
-                    LOG.debug("Restored from cache, datasource: %s", ds)
-                    myrep.description = "restored from cache"
-                else:
-                    myrep.description = "no cache found"
+                name="check-cache", description="attempting to read from cache",
+                parent=self.reporter) as myrep:
+            ds = self._restore_from_cache()
+            if ds:
+                LOG.debug("Restored from cache, datasource: %s", ds)
+                myrep.description = "restored from cache"
+            else:
+                myrep.description = "no cache found"
         if not ds:
             (cfg_list, pkg_list) = self._get_datasources()
             # Deep copy so that user-data handlers can not modify
@@ -515,7 +515,7 @@ class Init(object):
         with reporting.ReportEventStack(
             "consume-vendor-data", "reading and applying vendor-data",
             parent=self.reporter):
-                self._consume_userdata(frequency)
+                self._consume_vendordata(frequency)
 
         # Perform post-consumption adjustments so that
         # modules that run during the init stage reflect
@@ -593,6 +593,10 @@ class Modules(object):
         self.cfg_files = cfg_files
         # Created on first use
         self._cached_cfg = None
+        if reporter is None:
+            reporter = reporting.ReportEventStack(
+                name="module-reporter", description="module-desc",
+                reporting_enabled=False)
         self.reporter = reporter
 
     @property
-- 
cgit v1.2.3


From 0550f95ccb31c9cad18c4a5851eeca53e371dd6b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 4 Aug 2015 21:52:46 -0500
Subject: sync to 2.0 review @ patchset 4

---
 cloudinit/reporting/__init__.py | 56 +++++++++++++++++++++++++++++++++++------
 1 file changed, 49 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 78dde715..2b92ab58 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -22,6 +22,7 @@ DEFAULT_CONFIG = {
 
 instantiated_handler_registry = DictRegistry()
 
+
 class _nameset(set):
     def __getattr__(self, name):
         if name in self:
@@ -107,6 +108,36 @@ def report_start_event(event_name, event_description):
 
 
 class ReportEventStack(object):
+    """Context Manager for using :py:func:`report_event`
+
+    This enables calling :py:func:`report_start_event` and
+    :py:func:`report_finish_event` through a context manager.
+
+    :param name:
+        the name of the event
+
+    :param description:
+        the event's description, passed on to :py:func:`report_start_event`
+
+    :param message:
+        the description to use for the finish event. defaults to
+        :param:description.
+
+    :param parent:
+    :type parent: :py:class:ReportEventStack or None
+        The parent of this event.  The parent is populated with
+        results of all its children.  The name used in reporting
+        is <parent.name>/<name>
+
+    :param reporting_enabled:
+        Indicates if reporting events should be generated.
+        If not provided, defaults to the parent's value, or True if no parent
+        is provided.
+
+    :param result_on_exception:
+        The result value to set if an exception is caught. default
+        value is FAIL.
+    """
     def __init__(self, name, description, message=None, parent=None,
                  reporting_enabled=None, result_on_exception=status.FAIL):
         self.parent = parent
@@ -131,7 +162,8 @@ class ReportEventStack(object):
         self.children = {}
 
     def __repr__(self):
-        return ("%s reporting=%s" % (self.fullname, self.reporting_enabled))
+        return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
+                (self.name, self.description, self.reporting_enabled))
 
     def __enter__(self):
         self.result = status.SUCCESS
@@ -141,13 +173,23 @@ class ReportEventStack(object):
             self.parent.children[self.name] = (None, None)
         return self
 
-    def childrens_finish_info(self):
+    def _childrens_finish_info(self):
         for cand_result in (status.FAIL, status.WARN):
             for name, (value, msg) in self.children.items():
                 if value == cand_result:
-                    return (value, "[" + name + "]" + msg)
+                    return (value, self.message)
         return (self.result, self.message)
 
+    @property
+    def result(self):
+        return self._result
+
+    @result.setter
+    def result(self, value):
+        if value not in status:
+            raise ValueError("'%s' not a valid result" % value)
+        self._result = value
+
     @property
     def message(self):
         if self._message is not None:
@@ -157,15 +199,15 @@ class ReportEventStack(object):
     @message.setter
     def message(self, value):
         self._message = value
-       
-    def finish_info(self, exc):
+
+    def _finish_info(self, exc):
         # return tuple of description, and value
         if exc:
             return (self.result_on_exception, self.message)
-        return self.childrens_finish_info()
+        return self._childrens_finish_info()
 
     def __exit__(self, exc_type, exc_value, traceback):
-        (result, msg) = self.finish_info(exc_value)
+        (result, msg) = self._finish_info(exc_value)
         if self.parent:
             self.parent.children[self.name] = (result, msg)
         if self.reporting_enabled:
-- 
cgit v1.2.3


From 328cc7fbaf4d60b51193fb8c14e52d8c6f3273f2 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 4 Aug 2015 21:57:57 -0500
Subject: pep8 fixes

---
 cloudinit/config/cc_rh_subscription.py    | 6 +++---
 cloudinit/config/cc_rsyslog.py            | 1 +
 cloudinit/sources/DataSourceCloudStack.py | 6 ++++--
 3 files changed, 8 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 6da26d25..3b30c47e 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -130,9 +130,9 @@ class SubscriptionManager(object):
                 ((not self.auto_attach)
                  or (util.is_false(str(self.auto_attach)))):
 
-            no_auto = "The service-level key must be used in conjunction with "\
-                      "the auto-attach key.  Please re-run with auto-attach: "\
-                      "True"
+            no_auto = ("The service-level key must be used in conjunction "
+                       "with the auto-attach key.  Please re-run with "
+                       "auto-attach: True")
             return False, no_auto
         return True, None
 
diff --git a/cloudinit/config/cc_rsyslog.py b/cloudinit/config/cc_rsyslog.py
index a0132d28..b8642d65 100644
--- a/cloudinit/config/cc_rsyslog.py
+++ b/cloudinit/config/cc_rsyslog.py
@@ -130,6 +130,7 @@ HOST_PORT_RE = re.compile(
     '(([[](?P<bracket_addr>[^\]]*)[\]])|(?P<addr>[^:]*))'
     '([:](?P<port>[0-9]+))?$')
 
+
 def reload_syslog(command=DEF_RELOAD, systemd=False):
     service = 'rsyslog'
     if command == DEF_RELOAD:
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index d0cac5bb..64595020 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -41,10 +41,12 @@ class CloudStackPasswordServerClient(object):
     """
     Implements password fetching from the CloudStack password server.
 
-    http://cloudstack-administration.readthedocs.org/en/latest/templates.html#adding-password-management-to-your-templates
+    http://cloudstack-administration.readthedocs.org/
+       en/latest/templates.html#adding-password-management-to-your-templates
     has documentation about the system.  This implementation is following that
     found at
-    https://github.com/shankerbalan/cloudstack-scripts/blob/master/cloud-set-guest-password-debian
+    https://github.com/shankerbalan/cloudstack-scripts/
+       blob/master/cloud-set-guest-password-debian
     """
 
     def __init__(self, virtual_router_address):
-- 
cgit v1.2.3


From 5585b397cfb4ba397e9cfba3d86e3d10af20eb71 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 4 Aug 2015 22:01:27 -0500
Subject: fix pep8

---
 bin/cloud-init                    |  3 ++-
 cloudinit/reporting/handlers.py   |  7 -------
 cloudinit/sources/__init__.py     |  2 +-
 cloudinit/stages.py               | 10 ++++++----
 tests/unittests/test_reporting.py |  6 ++++--
 5 files changed, 13 insertions(+), 15 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index 51253c42..40cdbb06 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -628,7 +628,8 @@ def main():
         if args.local:
             rname, rdesc = ("init-local", "searching for local datasources")
         else:
-            rname, rdesc = ("init-network", "searching for network datasources")
+            rname, rdesc = ("init-network",
+                            "searching for network datasources")
     elif name == "modules":
         rname, rdesc = ("modules-%s" % args.mode,
                         "running modules for %s" % args.mode)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 1d5ca524..be323f53 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -21,12 +21,5 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
-class StderrHandler(ReportingHandler):
-    def publish_event(self, event):
-        #sys.stderr.write(event.as_string() + "\n")
-        print(event.as_string())
-
-
 available_handlers = DictRegistry()
 available_handlers.register_item('log', LogHandler)
-available_handlers.register_item('print', StderrHandler)
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index cf50c1fb..838cd198 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -257,7 +257,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
         myrep = reporting.ReportEventStack(
             name="search-%s" % name.replace("DataSource", ""),
             description="searching for %s data from %s" % (mode, name),
-            message = "no %s data found from %s" % (mode, name),
+            message="no %s data found from %s" % (mode, name),
             parent=reporter)
         try:
             with myrep:
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 7b489b9f..d300709d 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -243,7 +243,8 @@ class Init(object):
             return self.datasource
 
         with reporting.ReportEventStack(
-                name="check-cache", description="attempting to read from cache",
+                name="check-cache",
+                description="attempting to read from cache",
                 parent=self.reporter) as myrep:
             ds = self._restore_from_cache()
             if ds:
@@ -708,17 +709,18 @@ class Modules(object):
                 # This name will affect the semaphore name created
                 run_name = "config-%s" % (name)
 
-                desc="running %s with frequency %s" % (run_name, freq)
+                desc = "running %s with frequency %s" % (run_name, freq)
                 myrep = reporting.ReportEventStack(
                     name=run_name, description=desc, parent=self.reporter)
 
                 with myrep:
-                    ran, _r = cc.run(run_name, mod.handle, func_args, freq=freq)
+                    ran, _r = cc.run(run_name, mod.handle, func_args,
+                                     freq=freq)
                     if ran:
                         myrep.message = "%s ran successfully" % run_name
                     else:
                         myrep.message = "%s previously ran" % run_name
-                    
+
             except Exception as e:
                 util.logexc(LOG, "Running module %s (%s) failed", name, mod)
                 failures.append((name, e))
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 4f4cf3a4..ddfac541 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -51,7 +51,8 @@ class TestReportFinishEvent(TestCase):
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event()
         expected_string_representation = ': '.join(
-            ['finish', event_name, reporting.status.SUCCESS, event_description])
+            ['finish', event_name, reporting.status.SUCCESS,
+             event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
@@ -63,7 +64,8 @@ class TestReportFinishEvent(TestCase):
         event_name, event_description = self._report_finish_event(
             result=reporting.status.SUCCESS)
         expected_string_representation = ': '.join(
-            ['finish', event_name, reporting.status.SUCCESS, event_description])
+            ['finish', event_name, reporting.status.SUCCESS,
+             event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
-- 
cgit v1.2.3


From 6fdb23b6cbc8de14ebcffc17e9e49342b7bf193d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 6 Aug 2015 18:19:46 -0500
Subject: sync with cloudinit 2.0 for registry and reporting

---
 cloudinit/registry.py             | 14 +++++++++++
 cloudinit/reporting/__init__.py   | 29 +++++++++++++++++++---
 cloudinit/reporting/handlers.py   | 15 +++++++++++-
 tests/unittests/test_reporting.py | 51 ++++++++++++++++++++++++++++++++-------
 4 files changed, 95 insertions(+), 14 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/registry.py b/cloudinit/registry.py
index 46cf0585..04368ddf 100644
--- a/cloudinit/registry.py
+++ b/cloudinit/registry.py
@@ -1,3 +1,7 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init.  See LICENCE file for license information.
+#
+# vi: ts=4 expandtab
 import copy
 
 
@@ -5,6 +9,9 @@ class DictRegistry(object):
     """A simple registry for a mapping of objects."""
 
     def __init__(self):
+        self.reset()
+
+    def reset(self):
         self._items = {}
 
     def register_item(self, key, item):
@@ -14,6 +21,13 @@ class DictRegistry(object):
                 'Item already registered with key {0}'.format(key))
         self._items[key] = item
 
+    def unregister_item(self, key, force=True):
+        """Remove item from the registry."""
+        if key in self._items:
+            del self._items[key]
+        elif not force:
+            raise KeyError("%s: key not present to unregister" % key)
+
     @property
     def registered_items(self):
         """All the items that have been registered.
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 2b92ab58..d0bc14e3 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -20,8 +20,6 @@ DEFAULT_CONFIG = {
     'logging': {'type': 'log'},
 }
 
-instantiated_handler_registry = DictRegistry()
-
 
 class _nameset(set):
     def __getattr__(self, name):
@@ -46,6 +44,11 @@ class ReportingEvent(object):
         return '{0}: {1}: {2}'.format(
             self.event_type, self.name, self.description)
 
+    def as_dict(self):
+        """The event represented as a dictionary."""
+        return {'name': self.name, 'description': self.description,
+                'event_type': self.event_type}
+
 
 class FinishReportingEvent(ReportingEvent):
 
@@ -60,9 +63,26 @@ class FinishReportingEvent(ReportingEvent):
         return '{0}: {1}: {2}: {3}'.format(
             self.event_type, self.name, self.result, self.description)
 
+    def as_dict(self):
+        """The event represented as json friendly."""
+        data = super(FinishReportingEvent, self).as_dict()
+        data['result'] = self.result
+        return data
+
 
-def add_configuration(config):
+def update_configuration(config):
+    """Update the instanciated_handler_registry.
+
+    :param config:
+        The dictionary containing changes to apply.  If a key is given
+        with a False-ish value, the registered handler matching that name
+        will be unregistered.
+    """
     for handler_name, handler_config in config.items():
+        if not handler_config:
+            instantiated_handler_registry.unregister_item(
+                handler_name, force=True)
+            continue
         handler_config = handler_config.copy()
         cls = available_handlers.registered_items[handler_config.pop('type')]
         instance = cls(**handler_config)
@@ -214,4 +234,5 @@ class ReportEventStack(object):
             report_finish_event(self.fullname, msg, result)
 
 
-add_configuration(DEFAULT_CONFIG)
+instantiated_handler_registry = DictRegistry()
+update_configuration(DEFAULT_CONFIG)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index be323f53..86cbe3c3 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,14 +1,27 @@
+# vi: ts=4 expandtab
+
 import abc
 import logging
+import oauthlib.oauth1 as oauth1
+
+import six
 
 from cloudinit.registry import DictRegistry
+from cloudinit import url_helper
+from cloudinit import util
 
 
+@six.add_metaclass(abc.ABCMeta)
 class ReportingHandler(object):
+    """Base class for report handlers.
+
+    Implement :meth:`~publish_event` for controlling what
+    the handler does with an event.
+    """
 
     @abc.abstractmethod
     def publish_event(self, event):
-        raise NotImplementedError
+        """Publish an event to the ``INFO`` log level."""
 
 
 class LogHandler(ReportingHandler):
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index ffeb55d2..1a4ee8c4 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -4,6 +4,7 @@
 # vi: ts=4 expandtab
 
 from cloudinit import reporting
+from cloudinit.reporting import handlers
 
 from .helpers import (mock, TestCase)
 
@@ -95,13 +96,29 @@ class TestReportingEvent(TestCase):
             [event_type, name, description])
         self.assertEqual(expected_string_representation, event.as_string())
 
+    def test_as_dict(self):
+        event_type, name, desc = 'test_type', 'test_name', 'test_desc'
+        event = reporting.ReportingEvent(event_type, name, desc)
+        self.assertEqual(
+            {'event_type': event_type, 'name': name, 'description': desc},
+            event.as_dict())
+
+
+class TestFinishReportingEvent(TestCase):
+    def test_as_has_result(self):
+        result = reporting.status.SUCCESS
+        name, desc = 'test_name', 'test_desc'
+        event = reporting.FinishReportingEvent(name, desc, result)
+        ret = event.as_dict()
+        self.assertTrue('result' in ret)
+        self.assertEqual(ret['result'], result)
+
 
-class TestReportingHandler(TestCase):
+class TestBaseReportingHandler(TestCase):
 
-    def test_no_default_publish_event_implementation(self):
-        self.assertRaises(NotImplementedError,
-                          reporting.handlers.ReportingHandler().publish_event,
-                          None)
+    def test_base_reporting_handler_is_abstract(self):
+        regexp = r".*abstract.*publish_event.*"
+        self.assertRaisesRegexp(TypeError, regexp, handlers.ReportingHandler)
 
 
 class TestLogHandler(TestCase):
@@ -147,7 +164,7 @@ class TestReportingConfiguration(TestCase):
     @mock.patch.object(reporting, 'instantiated_handler_registry')
     def test_empty_configuration_doesnt_add_handlers(
             self, instantiated_handler_registry):
-        reporting.add_configuration({})
+        reporting.update_configuration({})
         self.assertEqual(
             0, instantiated_handler_registry.register_item.call_count)
 
@@ -159,7 +176,7 @@ class TestReportingConfiguration(TestCase):
         handler_cls = mock.Mock()
         available_handlers.registered_items = {handler_type_name: handler_cls}
         handler_name = 'my_test_handler'
-        reporting.add_configuration(
+        reporting.update_configuration(
             {handler_name: {'type': handler_type_name}})
         self.assertEqual(
             {handler_name: handler_cls.return_value},
@@ -177,7 +194,7 @@ class TestReportingConfiguration(TestCase):
         handler_config = extra_kwargs.copy()
         handler_config.update({'type': handler_type_name})
         handler_name = 'my_test_handler'
-        reporting.add_configuration({handler_name: handler_config})
+        reporting.update_configuration({handler_name: handler_config})
         self.assertEqual(
             handler_cls.return_value,
             reporting.instantiated_handler_registry.registered_items[
@@ -194,9 +211,25 @@ class TestReportingConfiguration(TestCase):
         available_handlers.registered_items = {handler_type_name: handler_cls}
         handler_config = {'type': handler_type_name, 'foo': 'bar'}
         expected_handler_config = handler_config.copy()
-        reporting.add_configuration({'my_test_handler': handler_config})
+        reporting.update_configuration({'my_test_handler': handler_config})
         self.assertEqual(expected_handler_config, handler_config)
 
+    @mock.patch.object(
+        reporting, 'instantiated_handler_registry', reporting.DictRegistry())
+    @mock.patch.object(reporting, 'available_handlers')
+    def test_handlers_removed_if_falseish_specified(self, available_handlers):
+        handler_type_name = 'test_handler'
+        handler_cls = mock.Mock()
+        available_handlers.registered_items = {handler_type_name: handler_cls}
+        handler_name = 'my_test_handler'
+        reporting.update_configuration(
+            {handler_name: {'type': handler_type_name}})
+        self.assertEqual(
+            1, len(reporting.instantiated_handler_registry.registered_items))
+        reporting.update_configuration({handler_name: None})
+        self.assertEqual(
+            0, len(reporting.instantiated_handler_registry.registered_items))
+
 
 class TestReportingEventStack(TestCase):
     @mock.patch('cloudinit.reporting.report_finish_event')
-- 
cgit v1.2.3


From ebd393e56ba21f8a84571dff499e6d6fb6852042 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 6 Aug 2015 18:34:57 -0500
Subject: tests pass

---
 bin/cloud-init                      |  10 +++
 cloudinit/reporting/handlers.py     |  28 +++++++
 cloudinit/sources/DataSourceMAAS.py |  88 +++++-----------------
 cloudinit/url_helper.py             | 142 ++++++++++++++++++++++++++++++++++--
 cloudinit/util.py                   |   3 +-
 5 files changed, 196 insertions(+), 75 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index 40cdbb06..ad2e624a 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -137,6 +137,11 @@ def run_module_section(mods, action_name, section):
         return failures
 
 
+def apply_reporting_cfg(cfg):
+    reporting.reset_configuration()
+    reporting.update_configuration(cfg.get('reporting'), {})
+
+
 def main_init(name, args):
     deps = [sources.DEP_FILESYSTEM, sources.DEP_NETWORK]
     if args.local:
@@ -191,6 +196,7 @@ def main_init(name, args):
                     " longer be active shortly"))
         logging.resetLogging()
     logging.setupLogging(init.cfg)
+    apply_reporting_cfg(init.cfg)
 
     # Any log usage prior to setupLogging above did not have local user log
     # config applied.  We send the welcome message now, as stderr/out have
@@ -283,6 +289,8 @@ def main_init(name, args):
         util.logexc(LOG, "Consuming user data failed!")
         return (init.datasource, ["Consuming user data failed!"])
 
+    apply_reporting_cfg(init.cfg)
+
     # Stage 8 - re-read and apply relevant cloud-config to include user-data
     mods = stages.Modules(init, extract_fns(args), reporter=args.reporter)
     # Stage 9
@@ -343,6 +351,7 @@ def main_modules(action_name, args):
                     " longer be active shortly"))
         logging.resetLogging()
     logging.setupLogging(mods.cfg)
+    apply_reporting_cfg(init.cfg)
 
     # now that logging is setup and stdout redirected, send welcome
     welcome(name, msg=w_msg)
@@ -405,6 +414,7 @@ def main_single(name, args):
                    " longer be active shortly"))
         logging.resetLogging()
     logging.setupLogging(mods.cfg)
+    apply_reporting_cfg(init.cfg)
 
     # now that logging is setup and stdout redirected, send welcome
     welcome(name, msg=w_msg)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 86cbe3c3..d8f69641 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -34,5 +34,33 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
+class WebHookHandler(ReportingHandler):
+    def __init__(self, endpoint, consumer_key=None, token_key=None,
+                 token_secret=None, consumer_secret=None, timeout=None,
+                 retries=None):
+        super(WebHookHandler, self).__init__()
+
+        if any(consumer_key, token_key, token_secret, consumer_secret):
+            self.oauth_helper = url_helper.OauthHelper(
+                consumer_key=consumer_key, token_key=token_key,
+                token_secret=token_secret, consumer_secret=consumer_secret)
+        else:
+            self.oauth_helper = None
+        self.endpoint = endpoint
+        self.timeout = timeout
+        self.retries = retries
+        self.ssl_details = util.fetch_ssl_details()
+
+    def publish_event(self, event):
+        if self.oauth_helper:
+            readurl = self.oauth_helper.readurl
+        else:
+            readurl = url_helper.readurl
+        return readurl(
+            self.endpoint, data=event.as_dict(),
+            timeout=self.timeout,
+            retries=self.retries, ssl_details=self.ssl_details)
+
+
 available_handlers = DictRegistry()
 available_handlers.register_item('log', LogHandler)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index c1a0eb61..279da238 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -52,7 +52,20 @@ class DataSourceMAAS(sources.DataSource):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
         self.base_url = None
         self.seed_dir = os.path.join(paths.seed_dir, 'maas')
-        self.oauth_clockskew = None
+        self.oauth_helper = self._get_helper()
+
+    def _get_helper(self):
+        mcfg = self.ds_cfg
+        # If we are missing token_key, token_secret or consumer_key
+        # then just do non-authed requests
+        for required in ('token_key', 'token_secret', 'consumer_key'):
+            if required not in mcfg:
+                return url_helper.OauthUrlHelper()
+
+        return url_helper.OauthHelper(
+            consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
+            token_secret=mcfg['token_secret'],
+            consumer_secret=mcfg.get('consumer_secret'))
 
     def __str__(self):
         root = sources.DataSource.__str__(self)
@@ -84,9 +97,9 @@ class DataSourceMAAS(sources.DataSource):
 
             self.base_url = url
 
-            (userdata, metadata) = read_maas_seed_url(self.base_url,
-                                                      self._md_headers,
-                                                      paths=self.paths)
+            (userdata, metadata) = read_maas_seed_url(
+                self.base_url, self.oauth_helper.md_headers,
+                paths=self.paths)
             self.userdata_raw = userdata
             self.metadata = metadata
             return True
@@ -94,31 +107,8 @@ class DataSourceMAAS(sources.DataSource):
             util.logexc(LOG, "Failed fetching metadata from url %s", url)
             return False
 
-    def _md_headers(self, url):
-        mcfg = self.ds_cfg
-
-        # If we are missing token_key, token_secret or consumer_key
-        # then just do non-authed requests
-        for required in ('token_key', 'token_secret', 'consumer_key'):
-            if required not in mcfg:
-                return {}
-
-        consumer_secret = mcfg.get('consumer_secret', "")
-
-        timestamp = None
-        if self.oauth_clockskew:
-            timestamp = int(time.time()) + self.oauth_clockskew
-
-        return oauth_headers(url=url,
-                             consumer_key=mcfg['consumer_key'],
-                             token_key=mcfg['token_key'],
-                             token_secret=mcfg['token_secret'],
-                             consumer_secret=consumer_secret,
-                             timestamp=timestamp)
-
     def wait_for_metadata_service(self, url):
         mcfg = self.ds_cfg
-
         max_wait = 120
         try:
             max_wait = int(mcfg.get("max_wait", max_wait))
@@ -138,10 +128,8 @@ class DataSourceMAAS(sources.DataSource):
         starttime = time.time()
         check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
         urls = [check_url]
-        url = url_helper.wait_for_url(urls=urls, max_wait=max_wait,
-                                      timeout=timeout,
-                                      exception_cb=self._except_cb,
-                                      headers_cb=self._md_headers)
+        url = self.oauth_helper.wait_for_url(
+            urls=urls, max_wait=max_wait, timeout=timeout)
 
         if url:
             LOG.debug("Using metadata source: '%s'", url)
@@ -151,26 +139,6 @@ class DataSourceMAAS(sources.DataSource):
 
         return bool(url)
 
-    def _except_cb(self, msg, exception):
-        if not (isinstance(exception, url_helper.UrlError) and
-                (exception.code == 403 or exception.code == 401)):
-            return
-
-        if 'date' not in exception.headers:
-            LOG.warn("Missing header 'date' in %s response", exception.code)
-            return
-
-        date = exception.headers['date']
-        try:
-            ret_time = time.mktime(parsedate(date))
-        except Exception as e:
-            LOG.warn("Failed to convert datetime '%s': %s", date, e)
-            return
-
-        self.oauth_clockskew = int(ret_time - time.time())
-        LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew)
-        return
-
 
 def read_maas_seed_dir(seed_d):
     """
@@ -280,24 +248,6 @@ def check_seed_contents(content, seed):
     return (userdata, md)
 
 
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
-                  timestamp=None):
-    if timestamp:
-        timestamp = str(timestamp)
-    else:
-        timestamp = None
-
-    client = oauth1.Client(
-        consumer_key,
-        client_secret=consumer_secret,
-        resource_owner_key=token_key,
-        resource_owner_secret=token_secret,
-        signature_method=oauth1.SIGNATURE_PLAINTEXT,
-        timestamp=timestamp)
-    uri, signed_headers, body = client.sign(url)
-    return signed_headers
-
-
 class MAASSeedDirNone(Exception):
     pass
 
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 0e65f431..2141cdc5 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -25,6 +25,10 @@ import time
 import six
 
 import requests
+import oauthlib.oauth1 as oauth1
+import os
+import json
+from functools import partial
 from requests import exceptions
 
 from six.moves.urllib.parse import (
@@ -147,13 +151,14 @@ class UrlResponse(object):
 
 
 class UrlError(IOError):
-    def __init__(self, cause, code=None, headers=None):
+    def __init__(self, cause, code=None, headers=None, url=None):
         IOError.__init__(self, str(cause))
         self.cause = cause
         self.code = code
         self.headers = headers
         if self.headers is None:
             self.headers = {}
+        self.url = url
 
 
 def _get_ssl_args(url, ssl_details):
@@ -247,9 +252,10 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
                     and hasattr(e, 'response')  # This appeared in v 0.10.8
                     and hasattr(e.response, 'status_code')):
                 excps.append(UrlError(e, code=e.response.status_code,
-                                      headers=e.response.headers))
+                                      headers=e.response.headers,
+                                      url=url))
             else:
-                excps.append(UrlError(e))
+                excps.append(UrlError(e, url=url))
                 if SSL_ENABLED and isinstance(e, exceptions.SSLError):
                     # ssl exceptions are not going to get fixed by waiting a
                     # few seconds
@@ -333,11 +339,11 @@ def wait_for_url(urls, max_wait=None, timeout=None,
                 if not response.contents:
                     reason = "empty response [%s]" % (response.code)
                     url_exc = UrlError(ValueError(reason), code=response.code,
-                                       headers=response.headers)
+                                       headers=response.headers, url=url)
                 elif not response.ok():
                     reason = "bad status code [%s]" % (response.code)
                     url_exc = UrlError(ValueError(reason), code=response.code,
-                                       headers=response.headers)
+                                       headers=response.headers, url=url)
                 else:
                     return url
             except UrlError as e:
@@ -368,3 +374,129 @@ def wait_for_url(urls, max_wait=None, timeout=None,
         time.sleep(sleep_time)
 
     return False
+
+
+class OauthUrlHelper(object):
+    def __init__(self, consumer_key=None, token_key=None,
+                 token_secret=None, consumer_secret=None,
+                 skew_data_file="/run/oauth_skew.json"):
+        self.consumer_key = consumer_key
+        self.consumer_secret = consumer_secret or ""
+        self.token_key = token_key
+        self.token_secret = token_secret
+        self.skew_data_file = skew_data_file
+        self.skew_data = {}
+        self._do_oauth = True
+        self.skew_change_limit = 5
+        required = (self.token_key, self.token_secret, self.consumer_key)
+        if not any(required):
+            self._do_oauth = False
+        elif not all(required):
+            raise ValueError("all or none of token_key, token_secret, or "
+                             "consumer_key can be set")
+
+        self.skew_data = self.read_skew_file()
+
+    def read_skew_file(self):
+        if self.skew_data_file and os.path.isfile(self.skew_data_file):
+            with open(self.skew_data_file, mode="r") as fp:
+                return json.load(fp.read())
+        return None
+
+    def update_skew_file(self, host, value):
+        # this is not atomic
+        cur = self.read_skew_file()
+        if cur is None or not self.skew_data_file:
+            return
+        cur[host] = value
+        with open(self.skew_data_file, mode="w") as fp:
+            fp.write(json.dumps(cur))
+
+    def exception_cb(self, msg, exception):
+        if not (isinstance(exception, UrlError) and
+                (exception.code == 403 or exception.code == 401)):
+            return
+
+        if 'date' not in exception.headers:
+            LOG.warn("Missing header 'date' in %s response", exception.code)
+            return
+
+        date = exception.headers['date']
+        try:
+            ret_time = time.mktime(parsedate(date))
+        except Exception as e:
+            LOG.warn("Failed to convert datetime '%s': %s", date, e)
+            return
+
+        host = urlparse(exception.url).netloc
+        skew = int(ret_time - time.time())
+        old_skew = self.skew_data.get(host)
+        if abs(old_skew - skew) > self.skew_change_limit:
+            self.update_skew_file(host, skew)
+            LOG.warn("Setting oauth clockskew for %s to %d",
+                     host, skew)
+        skew_data[host] = skew
+
+        return
+
+    def headers_cb(self, url):
+        if not self._do_oauth:
+            return {}
+
+        timestamp = None
+        host = urlparse(url).netloc
+        if host in self.skew_data:
+            timestamp = int(time.time()) + self.skew_data[host]
+
+        return oauth_headers(
+            url=url, consumer_key=self.consumer_key,
+            token_key=self.token_key, token_secret=self.token_secret,
+            consumer_secret=self.consumer_secret, timestamp=timestamp)
+
+    def _wrapped(self, wrapped_func, args, kwargs):
+        kwargs['headers_cb'] = partial(
+            self._headers_cb, kwargs.get('headers_cb'))
+        kwargs['exception_cb'] = partial(
+            self._exception_cb, kwargs.get('exception_cb'))
+        return wrapped_func(*args, **kwargs)
+
+    def wait_for_url(self, *args, **kwargs):
+        return self._wrapped(wait_for_url, args, kwargs)
+
+    def readurl(self, *args, **kwargs):
+        return self._wrapped(readurl, args, kwargs)
+
+    def _exception_cb(self, extra_exception_cb, url, msg, exception):
+        ret = None
+        try:
+            if extra_exception_cb:
+                ret = extra_exception_cb(msg, exception)
+        finally:
+                self.exception_cb(self, msg, exception)
+        return ret
+
+    def _headers_cb(self, extra_headers_cb, url):
+        headers = {}
+        if extra_headers_cb:
+            headers = extra_headers_cb(url)
+        if headers:
+            headers.update(self.headers_cb(url))
+        return headers
+
+
+def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
+                  timestamp=None):
+    if timestamp:
+        timestamp = str(timestamp)
+    else:
+        timestamp = None
+
+    client = oauth1.Client(
+        consumer_key,
+        client_secret=consumer_secret,
+        resource_owner_key=token_key,
+        resource_owner_secret=token_secret,
+        signature_method=oauth1.SIGNATURE_PLAINTEXT,
+        timestamp=timestamp)
+    uri, signed_headers, body = client.sign(url)
+    return signed_headers
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 02ba654a..09e583f5 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -782,7 +782,8 @@ def read_file_or_url(url, timeout=5, retries=10,
             code = e.errno
             if e.errno == errno.ENOENT:
                 code = url_helper.NOT_FOUND
-            raise url_helper.UrlError(cause=e, code=code, headers=None)
+            raise url_helper.UrlError(cause=e, code=code, headers=None,
+                                      url=url)
         return url_helper.FileResponse(file_path, contents=contents)
     else:
         return url_helper.readurl(url,
-- 
cgit v1.2.3


From fc5fc6e476059327d4063f165170cdde01db4100 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 6 Aug 2015 23:51:17 -0500
Subject: add the webhook handler

---
 bin/cloud-init                  |  6 +++---
 cloudinit/reporting/__init__.py | 11 +++++++++--
 cloudinit/reporting/handlers.py | 15 +++++++++++----
 3 files changed, 23 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index ad2e624a..86780408 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -138,8 +138,8 @@ def run_module_section(mods, action_name, section):
 
 
 def apply_reporting_cfg(cfg):
-    reporting.reset_configuration()
-    reporting.update_configuration(cfg.get('reporting'), {})
+    if cfg.get('reporting'):
+        reporting.update_configuration(cfg.get('reporting'))
 
 
 def main_init(name, args):
@@ -648,7 +648,7 @@ def main():
                         "running single module %s" % args.name)
         report_on = args.report
 
-    reporting.add_configuration({'print': {'type': 'print'}})
+    reporting.update_configuration({'print': {'type': 'print'}})
     args.reporter = reporting.ReportEventStack(
         rname, rdesc, reporting_enabled=report_on)
     with args.reporter:
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index d0bc14e3..b9d4f679 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -9,8 +9,8 @@ The reporting framework is intended to allow all parts of cloud-init to
 report events in a structured manner.
 """
 
-from cloudinit.registry import DictRegistry
-from cloudinit.reporting.handlers import available_handlers
+from ..registry import DictRegistry
+from ..reporting.handlers import available_handlers
 
 
 FINISH_EVENT_TYPE = 'finish'
@@ -18,6 +18,7 @@ START_EVENT_TYPE = 'start'
 
 DEFAULT_CONFIG = {
     'logging': {'type': 'log'},
+    'print': {'type': 'print'},
 }
 
 
@@ -83,8 +84,14 @@ def update_configuration(config):
             instantiated_handler_registry.unregister_item(
                 handler_name, force=True)
             continue
+        registered = instantiated_handler_registry.registered_items
         handler_config = handler_config.copy()
         cls = available_handlers.registered_items[handler_config.pop('type')]
+        if (handler_name in registered and
+                (registered[handler_name] == handler_config)):
+            continue
+        else:
+            instantiated_handler_registry.unregister_item(handler_name)
         instance = cls(**handler_config)
         instantiated_handler_registry.register_item(handler_name, instance)
 
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index d8f69641..a962edae 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -6,9 +6,8 @@ import oauthlib.oauth1 as oauth1
 
 import six
 
-from cloudinit.registry import DictRegistry
-from cloudinit import url_helper
-from cloudinit import util
+from ..registry import DictRegistry
+from .. import (url_helper, util)
 
 
 @six.add_metaclass(abc.ABCMeta)
@@ -34,13 +33,19 @@ class LogHandler(ReportingHandler):
         logger.info(event.as_string())
 
 
+class PrintHandler(ReportingHandler):
+    def publish_event(self, event):
+        """Publish an event to the ``INFO`` log level."""
+        print(event.as_string())
+
+
 class WebHookHandler(ReportingHandler):
     def __init__(self, endpoint, consumer_key=None, token_key=None,
                  token_secret=None, consumer_secret=None, timeout=None,
                  retries=None):
         super(WebHookHandler, self).__init__()
 
-        if any(consumer_key, token_key, token_secret, consumer_secret):
+        if any([consumer_key, token_key, token_secret, consumer_secret]):
             self.oauth_helper = url_helper.OauthHelper(
                 consumer_key=consumer_key, token_key=token_key,
                 token_secret=token_secret, consumer_secret=consumer_secret)
@@ -64,3 +69,5 @@ class WebHookHandler(ReportingHandler):
 
 available_handlers = DictRegistry()
 available_handlers.register_item('log', LogHandler)
+available_handlers.register_item('print', PrintHandler)
+available_handlers.register_item('webhook', WebHookHandler)
-- 
cgit v1.2.3


From 48cb8699efb5c6116dfa7b4d76d0a5fb6b3fbbbf Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 00:22:49 -0500
Subject: hopefully fix DataSourceMAAS

---
 cloudinit/sources/DataSourceMAAS.py          | 58 +++++++++++-----------------
 tests/unittests/test_datasource/test_maas.py |  2 +-
 2 files changed, 24 insertions(+), 36 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 279da238..2f36bbe2 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -164,12 +164,12 @@ def read_maas_seed_dir(seed_d):
     return check_seed_contents(md, seed_d)
 
 
-def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
+def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
                        version=MD_VERSION, paths=None):
     """
     Read the maas datasource at seed_url.
-      - header_cb is a method that should return a headers dictionary for
-        a given url
+      read_file_or_url is a method that should provide an interface
+      like util.read_file_or_url
 
     Expected format of seed_url is are the following files:
       * <seed_url>/<version>/meta-data/instance-id
@@ -190,14 +190,12 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
         'user-data': "%s/%s" % (base_url, 'user-data'),
     }
 
+    if read_file_or_url is None:
+        read_file_or_url = util.read_file_or_url
+
     md = {}
     for name in file_order:
         url = files.get(name)
-        if not header_cb:
-            def _cb(url):
-                return {}
-            header_cb = _cb
-
         if name == 'user-data':
             retries = 0
         else:
@@ -205,10 +203,8 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
 
         try:
             ssl_details = util.fetch_ssl_details(paths)
-            resp = util.read_file_or_url(url, retries=retries,
-                                         headers_cb=header_cb,
-                                         timeout=timeout,
-                                         ssl_details=ssl_details)
+            resp = read_file_or_url(url, retries=retries,
+                                    timeout=timeout, ssl_details=ssl_details)
             if resp.ok():
                 if name in BINARY_FIELDS:
                     md[name] = resp.contents
@@ -311,47 +307,39 @@ if __name__ == "__main__":
                 if key in cfg and creds[key] is None:
                     creds[key] = cfg[key]
 
-        def geturl(url, headers_cb):
-            req = Request(url, data=None, headers=headers_cb(url))
-            return urlopen(req).read()
+        oauth_helper = url_helper.OauthUrlHelper(**creds)
+
+        def geturl(url):
+            return oauth_helper.readurl(url).contents
 
         def printurl(url, headers_cb):
-            print("== %s ==\n%s\n" % (url, geturl(url, headers_cb)))
+            print("== %s ==\n%s\n" % (url, geturl(url)))
 
-        def crawl(url, headers_cb=None):
+        def crawl(url):
             if url.endswith("/"):
-                for line in geturl(url, headers_cb).splitlines():
+                for line in geturl(url).splitlines():
                     if line.endswith("/"):
-                        crawl("%s%s" % (url, line), headers_cb)
+                        crawl("%s%s" % (url, line))
                     else:
-                        printurl("%s%s" % (url, line), headers_cb)
+                        printurl("%s%s" % (url, line))
             else:
-                printurl(url, headers_cb)
-
-        def my_headers(url):
-            headers = {}
-            if creds.get('consumer_key', None) is not None:
-                headers = oauth_headers(url, **creds)
-            return headers
+                printurl(url)
 
         if args.subcmd == "check-seed":
-            if args.url.startswith("http"):
-                (userdata, metadata) = read_maas_seed_url(args.url,
-                                                          header_cb=my_headers,
-                                                          version=args.apiver)
-            else:
-                (userdata, metadata) = read_maas_seed_url(args.url)
+            (userdata, metadata) = read_maas_seed_url(
+                args.url, read_file_or_url=oauth_helper.read_file_or_url,
+                version=args.apiver)
             print("=== userdata ===")
             print(userdata)
             print("=== metadata ===")
             pprint.pprint(metadata)
 
         elif args.subcmd == "get":
-            printurl(args.url, my_headers)
+            printurl(args.url)
 
         elif args.subcmd == "crawl":
             if not args.url.endswith("/"):
                 args.url = "%s/" % args.url
-            crawl(args.url, my_headers)
+            crawl(args.url)
 
     main()
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index f109bb04..eb97b692 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -141,7 +141,7 @@ class TestMAASDataSource(TestCase):
         with mock.patch.object(url_helper, 'readurl',
                                side_effect=side_effect()) as mockobj:
             userdata, metadata = DataSourceMAAS.read_maas_seed_url(
-                my_seed, header_cb=my_headers_cb, version=my_ver)
+                my_seed, version=my_ver)
 
             self.assertEqual(b"foodata", userdata)
             self.assertEqual(metadata['instance-id'],
-- 
cgit v1.2.3


From 89b381f01c727c8fb00724eb28bf98eafd97dbb4 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 00:45:11 -0500
Subject: seems functional in test

---
 cloudinit/reporting/handlers.py |  2 +-
 cloudinit/url_helper.py         | 10 ++++------
 2 files changed, 5 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index a962edae..eecd0a96 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -46,7 +46,7 @@ class WebHookHandler(ReportingHandler):
         super(WebHookHandler, self).__init__()
 
         if any([consumer_key, token_key, token_secret, consumer_secret]):
-            self.oauth_helper = url_helper.OauthHelper(
+            self.oauth_helper = url_helper.OauthUrlHelper(
                 consumer_key=consumer_key, token_key=token_key,
                 token_secret=token_secret, consumer_secret=consumer_secret)
         else:
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 2141cdc5..e598661f 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -385,7 +385,6 @@ class OauthUrlHelper(object):
         self.token_key = token_key
         self.token_secret = token_secret
         self.skew_data_file = skew_data_file
-        self.skew_data = {}
         self._do_oauth = True
         self.skew_change_limit = 5
         required = (self.token_key, self.token_secret, self.consumer_key)
@@ -445,7 +444,7 @@ class OauthUrlHelper(object):
 
         timestamp = None
         host = urlparse(url).netloc
-        if host in self.skew_data:
+        if self.skew_data and host in self.skew_data:
             timestamp = int(time.time()) + self.skew_data[host]
 
         return oauth_headers(
@@ -466,21 +465,20 @@ class OauthUrlHelper(object):
     def readurl(self, *args, **kwargs):
         return self._wrapped(readurl, args, kwargs)
 
-    def _exception_cb(self, extra_exception_cb, url, msg, exception):
+    def _exception_cb(self, extra_exception_cb, msg, exception):
         ret = None
         try:
             if extra_exception_cb:
                 ret = extra_exception_cb(msg, exception)
         finally:
-                self.exception_cb(self, msg, exception)
+                self.exception_cb(msg, exception)
         return ret
 
     def _headers_cb(self, extra_headers_cb, url):
         headers = {}
         if extra_headers_cb:
             headers = extra_headers_cb(url)
-        if headers:
-            headers.update(self.headers_cb(url))
+        headers.update(self.headers_cb(url))
         return headers
 
 
-- 
cgit v1.2.3


From 3c135e4b90ea55e85b9a9afd039acbb9fa672208 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 00:56:37 -0500
Subject: improvements on skew

---
 cloudinit/url_helper.py | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index e598661f..81569e19 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -394,7 +394,8 @@ class OauthUrlHelper(object):
             raise ValueError("all or none of token_key, token_secret, or "
                              "consumer_key can be set")
 
-        self.skew_data = self.read_skew_file()
+        old = self.read_skew_file()
+        self.skew_data = old or {}
 
     def read_skew_file(self):
         if self.skew_data_file and os.path.isfile(self.skew_data_file):
@@ -404,9 +405,9 @@ class OauthUrlHelper(object):
 
     def update_skew_file(self, host, value):
         # this is not atomic
-        cur = self.read_skew_file()
-        if cur is None or not self.skew_data_file:
+        if not self.skew_data_file:
             return
+        cur = self.read_skew_file()
         cur[host] = value
         with open(self.skew_data_file, mode="w") as fp:
             fp.write(json.dumps(cur))
@@ -422,18 +423,17 @@ class OauthUrlHelper(object):
 
         date = exception.headers['date']
         try:
-            ret_time = time.mktime(parsedate(date))
+            remote_time = time.mktime(parsedate(date))
         except Exception as e:
             LOG.warn("Failed to convert datetime '%s': %s", date, e)
             return
 
+        skew = int(remote_time - time.time())
         host = urlparse(exception.url).netloc
-        skew = int(ret_time - time.time())
-        old_skew = self.skew_data.get(host)
-        if abs(old_skew - skew) > self.skew_change_limit:
+        old_skew = self.skew_data.get(host, 0)
+        if (abs(old_skew - skew) > self.skew_change_limit:
             self.update_skew_file(host, skew)
-            LOG.warn("Setting oauth clockskew for %s to %d",
-                     host, skew)
+            LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
         skew_data[host] = skew
 
         return
-- 
cgit v1.2.3


From be2d965bcd2ebd58d41b790d6cc553d98a8234c4 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 09:19:29 -0500
Subject: fix syntax

---
 cloudinit/url_helper.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 81569e19..dca4cc85 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -431,7 +431,7 @@ class OauthUrlHelper(object):
         skew = int(remote_time - time.time())
         host = urlparse(exception.url).netloc
         old_skew = self.skew_data.get(host, 0)
-        if (abs(old_skew - skew) > self.skew_change_limit:
+        if abs(old_skew - skew) > self.skew_change_limit:
             self.update_skew_file(host, skew)
             LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
         skew_data[host] = skew
-- 
cgit v1.2.3


From 53f35028af55b06c19f409d6081aa766607f22a8 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 10:15:10 -0500
Subject: catch exception in webhook, adjust logging to use cloud-init logging

---
 cloudinit/reporting/handlers.py | 17 +++++++++++------
 1 file changed, 11 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index eecd0a96..9cf8bd2b 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,13 +1,15 @@
 # vi: ts=4 expandtab
 
 import abc
-import logging
 import oauthlib.oauth1 as oauth1
-
 import six
 
 from ..registry import DictRegistry
 from .. import (url_helper, util)
+from .. import log as logging
+
+
+LOG = logging.getLogger(__name__)
 
 
 @six.add_metaclass(abc.ABCMeta)
@@ -61,10 +63,13 @@ class WebHookHandler(ReportingHandler):
             readurl = self.oauth_helper.readurl
         else:
             readurl = url_helper.readurl
-        return readurl(
-            self.endpoint, data=event.as_dict(),
-            timeout=self.timeout,
-            retries=self.retries, ssl_details=self.ssl_details)
+        try:
+            return readurl(
+                self.endpoint, data=event.as_dict(),
+                timeout=self.timeout,
+                retries=self.retries, ssl_details=self.ssl_details)
+        except:
+            LOG.warn("failed posting event: %s" % event.as_string())
 
 
 available_handlers = DictRegistry()
-- 
cgit v1.2.3


From 71c8fedcd581d8c4aa937d270f5bbd2e5af99e26 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 10:20:34 -0500
Subject: undo broken logic that attempted to not re-initialize classes

---
 cloudinit/reporting/__init__.py | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index b9d4f679..a3b8332f 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -87,11 +87,7 @@ def update_configuration(config):
         registered = instantiated_handler_registry.registered_items
         handler_config = handler_config.copy()
         cls = available_handlers.registered_items[handler_config.pop('type')]
-        if (handler_name in registered and
-                (registered[handler_name] == handler_config)):
-            continue
-        else:
-            instantiated_handler_registry.unregister_item(handler_name)
+        instantiated_handler_registry.unregister_item(handler_name)
         instance = cls(**handler_config)
         instantiated_handler_registry.register_item(handler_name, instance)
 
-- 
cgit v1.2.3


From 95bfe5d5150e2bf0a26dd1b97578c4fd04152365 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 14:44:00 -0500
Subject: add doc, remove some debug / print statements.

---
 bin/cloud-init                          |  1 -
 cloudinit/reporting/__init__.py         |  1 -
 cloudinit/reporting/handlers.py         | 16 ++++++++++++++--
 doc/examples/cloud-config-reporting.txt | 17 +++++++++++++++++
 4 files changed, 31 insertions(+), 4 deletions(-)
 create mode 100644 doc/examples/cloud-config-reporting.txt

(limited to 'cloudinit')

diff --git a/bin/cloud-init b/bin/cloud-init
index 86780408..1f64461e 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -648,7 +648,6 @@ def main():
                         "running single module %s" % args.name)
         report_on = args.report
 
-    reporting.update_configuration({'print': {'type': 'print'}})
     args.reporter = reporting.ReportEventStack(
         rname, rdesc, reporting_enabled=report_on)
     with args.reporter:
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index a3b8332f..e23fab32 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -18,7 +18,6 @@ START_EVENT_TYPE = 'start'
 
 DEFAULT_CONFIG = {
     'logging': {'type': 'log'},
-    'print': {'type': 'print'},
 }
 
 
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 9cf8bd2b..1343311f 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -28,17 +28,29 @@ class ReportingHandler(object):
 class LogHandler(ReportingHandler):
     """Publishes events to the cloud-init log at the ``INFO`` log level."""
 
+    def __init__(self, level="DEBUG"):
+        super(LogHandler, self).__init__()
+        if isinstance(level, int):
+            pass
+        else:
+            input_level = level
+            try:
+                level = gettattr(logging, level.upper())
+            except:
+                LOG.warn("invalid level '%s', using WARN", input_level)
+                level = logging.WARN
+        self.level = level
+
     def publish_event(self, event):
         """Publish an event to the ``INFO`` log level."""
         logger = logging.getLogger(
             '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
-        logger.info(event.as_string())
+        logger.log(self.level, event.as_string())
 
 
 class PrintHandler(ReportingHandler):
     def publish_event(self, event):
         """Publish an event to the ``INFO`` log level."""
-        print(event.as_string())
 
 
 class WebHookHandler(ReportingHandler):
diff --git a/doc/examples/cloud-config-reporting.txt b/doc/examples/cloud-config-reporting.txt
new file mode 100644
index 00000000..ee00078f
--- /dev/null
+++ b/doc/examples/cloud-config-reporting.txt
@@ -0,0 +1,17 @@
+#cloud-config
+##
+## The following sets up 2 reporting end points.
+## A 'webhook' and a 'log' type.
+## It also disables the built in default 'log'
+reporting:
+   smtest:
+     type: webhook
+     endpoint: "http://myhost:8000/"
+     consumer_key: "ckey_foo"
+     consumer_secret: "csecret_foo"
+     token_key: "tkey_foo"
+     token_secret: "tkey_foo"
+   smlogger:
+     type: log
+     level: WARN
+   log: null
-- 
cgit v1.2.3


From b39070772aba62d68fea14603b8d657bbb529d5e Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 7 Aug 2015 16:06:36 -0500
Subject: reporting: fix logging reproter and tests

---
 cloudinit/reporting/handlers.py   | 2 +-
 tests/unittests/test_reporting.py | 6 +++---
 2 files changed, 4 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 1343311f..172679cc 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -35,7 +35,7 @@ class LogHandler(ReportingHandler):
         else:
             input_level = level
             try:
-                level = gettattr(logging, level.upper())
+                level = getattr(logging, level.upper())
             except:
                 LOG.warn("invalid level '%s', using WARN", input_level)
                 level = logging.WARN
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 1a4ee8c4..66d4e87e 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -137,14 +137,14 @@ class TestLogHandler(TestCase):
     def test_single_log_message_at_info_published(self, getLogger):
         event = reporting.ReportingEvent('type', 'name', 'description')
         reporting.handlers.LogHandler().publish_event(event)
-        self.assertEqual(1, getLogger.return_value.info.call_count)
+        self.assertEqual(1, getLogger.return_value.log.call_count)
 
     @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_log_message_uses_event_as_string(self, getLogger):
         event = reporting.ReportingEvent('type', 'name', 'description')
-        reporting.handlers.LogHandler().publish_event(event)
+        reporting.handlers.LogHandler(level="INFO").publish_event(event)
         self.assertIn(event.as_string(),
-                      getLogger.return_value.info.call_args[0][0])
+                      getLogger.return_value.log.call_args[0][1])
 
 
 class TestDefaultRegisteredHandler(TestCase):
-- 
cgit v1.2.3


From a9c1e3f747ae69401ebfca9ae64eec1c6d20ebe7 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 10 Aug 2015 11:17:28 -0400
Subject: reporting: remove unused variable, actually print in PrintHandler

---
 cloudinit/reporting/__init__.py | 1 -
 cloudinit/reporting/handlers.py | 1 +
 2 files changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index e23fab32..502af95c 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -83,7 +83,6 @@ def update_configuration(config):
             instantiated_handler_registry.unregister_item(
                 handler_name, force=True)
             continue
-        registered = instantiated_handler_registry.registered_items
         handler_config = handler_config.copy()
         cls = available_handlers.registered_items[handler_config.pop('type')]
         instantiated_handler_registry.unregister_item(handler_name)
diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 172679cc..5ed3cb84 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -23,6 +23,7 @@ class ReportingHandler(object):
     @abc.abstractmethod
     def publish_event(self, event):
         """Publish an event to the ``INFO`` log level."""
+        print(event.as_string())
 
 
 class LogHandler(ReportingHandler):
-- 
cgit v1.2.3


From 827b7b903abc07d5fb04591bbae5587e6dc44993 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Wed, 12 Aug 2015 12:51:39 -0400
Subject: swap: create swap with fallocate if possible

fallocate is much faster than 'dd' for creating and initializing a
swap file.

LP: #1482994
---
 cloudinit/config/cc_mounts.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 73b42f91..47b63dfc 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -206,7 +206,8 @@ def setup_swapfile(fname, size=None, maxsize=None):
         util.log_time(LOG.debug, msg, func=util.subp,
             args=[['sh', '-c',
                    ('rm -f "$1" && umask 0066 && '
-                    'dd if=/dev/zero "of=$1" bs=1M "count=$2" && '
+                    '{ fallocate -l "${2}M" "$1" || '
+                    '  dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
                     'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
                    'setup_swap', fname, mbsize]])
 
-- 
cgit v1.2.3


From 60a9ebaba73b2154ce841d36978e317197b66945 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 25 Aug 2015 15:03:35 -0400
Subject: MAAS: fixes to data source and OauthUrlHelper

the previous version was broken.  The vital fixes here are:
 * adding parsedate and oauth1 imports to url_helper
 * fix skew_data usage intending to use self.skew_data

Additionally:
 * reorder imports in url_helper
 * fixes to python3 -m cloudinit.sources.DataSourceMaas

LP: #1488507
---
 cloudinit/sources/DataSourceMAAS.py | 25 +++++++++++++------------
 cloudinit/url_helper.py             | 14 +++++++-------
 2 files changed, 20 insertions(+), 19 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 2f36bbe2..6c95c218 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -20,14 +20,10 @@
 
 from __future__ import print_function
 
-from email.utils import parsedate
 import errno
-import oauthlib.oauth1 as oauth1
 import os
 import time
 
-from six.moves.urllib_request import Request, urlopen
-
 from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import url_helper
@@ -62,7 +58,7 @@ class DataSourceMAAS(sources.DataSource):
             if required not in mcfg:
                 return url_helper.OauthUrlHelper()
 
-        return url_helper.OauthHelper(
+        return url_helper.OauthUrlHelper(
             consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
             token_secret=mcfg['token_secret'],
             consumer_secret=mcfg.get('consumer_secret'))
@@ -98,7 +94,7 @@ class DataSourceMAAS(sources.DataSource):
             self.base_url = url
 
             (userdata, metadata) = read_maas_seed_url(
-                self.base_url, self.oauth_helper.md_headers,
+                self.base_url, read_file_or_url=self.oauth_helper.readurl,
                 paths=self.paths)
             self.userdata_raw = userdata
             self.metadata = metadata
@@ -312,25 +308,30 @@ if __name__ == "__main__":
         def geturl(url):
             return oauth_helper.readurl(url).contents
 
-        def printurl(url, headers_cb):
-            print("== %s ==\n%s\n" % (url, geturl(url)))
+        def printurl(url):
+            print("== %s ==\n%s\n" % (url, geturl(url).decode()))
 
         def crawl(url):
             if url.endswith("/"):
-                for line in geturl(url).splitlines():
+                for line in geturl(url).decode().splitlines():
                     if line.endswith("/"):
                         crawl("%s%s" % (url, line))
+                    elif line == "meta-data":
+                        # meta-data is a dir, it *should* end in a /
+                        crawl("%s%s" % (url, "meta-data/"))
                     else:
                         printurl("%s%s" % (url, line))
             else:
                 printurl(url)
 
         if args.subcmd == "check-seed":
+            readurl = oauth_helper.readurl
+            if args.url[0] == "/" or args.url.startswith("file://"):
+                readurl = None
             (userdata, metadata) = read_maas_seed_url(
-                args.url, read_file_or_url=oauth_helper.read_file_or_url,
-                version=args.apiver)
+                args.url, version=args.apiver, read_file_or_url=readurl)
             print("=== userdata ===")
-            print(userdata)
+            print(userdata.decode())
             print("=== metadata ===")
             pprint.pprint(metadata)
 
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index dca4cc85..ce6b5444 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,16 +20,16 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-import time
-
+import json
+import os
+import requests
 import six
+import time
 
-import requests
-import oauthlib.oauth1 as oauth1
-import os
-import json
+from email.utils import parsedate
 from functools import partial
 from requests import exceptions
+import oauthlib.oauth1 as oauth1
 
 from six.moves.urllib.parse import (
     urlparse, urlunparse,
@@ -434,7 +434,7 @@ class OauthUrlHelper(object):
         if abs(old_skew - skew) > self.skew_change_limit:
             self.update_skew_file(host, skew)
             LOG.warn("Setting oauth clockskew for %s to %d", host, skew)
-        skew_data[host] = skew
+        self.skew_data[host] = skew
 
         return
 
-- 
cgit v1.2.3


From 6010d3c8b903d7dae8b0ff11ec45c6f78ea50cc8 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 31 Aug 2015 12:28:09 -0400
Subject: readurl: if headers are provided still provide base headers

we want cloud-init user agent to be present even if the user
provided some headers. In the event that they provided User-Agent,
this will respect their wishes.
---
 cloudinit/url_helper.py | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index ce6b5444..a93847ce 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -211,10 +211,14 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
     manual_tries = 1
     if retries:
         manual_tries = max(int(retries) + 1, 1)
-    if not headers:
-        headers = {
-            'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
-        }
+
+    def_headers = {
+        'User-Agent': 'Cloud-Init/%s' % (version.version_string()),
+    }
+    if headers:
+        def_headers.update(headers)
+    headers = def_headers
+
     if not headers_cb:
         def _cb(url):
             return headers
-- 
cgit v1.2.3


From 50bcb0f77d29a76a03946c6da13b15be25257402 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 31 Aug 2015 13:33:30 -0400
Subject: split 'events' portion of reporting into separate file

this just separates events from other things that could conceivably
be reported.
---
 cloudinit/cloud.py                |   4 +-
 cloudinit/reporting/__init__.py   | 203 +-----------------------------------
 cloudinit/reporting/events.py     | 210 ++++++++++++++++++++++++++++++++++++++
 cloudinit/sources/__init__.py     |   4 +-
 cloudinit/stages.py               |  14 +--
 tests/unittests/test_reporting.py | 121 +++++++++++-----------
 6 files changed, 285 insertions(+), 271 deletions(-)
 create mode 100644 cloudinit/reporting/events.py

(limited to 'cloudinit')

diff --git a/cloudinit/cloud.py b/cloudinit/cloud.py
index edee3887..3e6be203 100644
--- a/cloudinit/cloud.py
+++ b/cloudinit/cloud.py
@@ -24,7 +24,7 @@ import copy
 import os
 
 from cloudinit import log as logging
-from cloudinit import reporting
+from cloudinit.reporting import events
 
 LOG = logging.getLogger(__name__)
 
@@ -48,7 +48,7 @@ class Cloud(object):
         self._cfg = cfg
         self._runners = runners
         if reporter is None:
-            reporter = reporting.ReportEventStack(
+            reporter = events.ReportEventStack(
                 name="unnamed-cloud-reporter",
                 description="unnamed-cloud-reporter",
                 reporting_enabled=False)
diff --git a/cloudinit/reporting/__init__.py b/cloudinit/reporting/__init__.py
index 502af95c..6b41ae61 100644
--- a/cloudinit/reporting/__init__.py
+++ b/cloudinit/reporting/__init__.py
@@ -1,7 +1,6 @@
 # Copyright 2015 Canonical Ltd.
 # This file is part of cloud-init.  See LICENCE file for license information.
 #
-# vi: ts=4 expandtab
 """
 cloud-init reporting framework
 
@@ -10,66 +9,13 @@ report events in a structured manner.
 """
 
 from ..registry import DictRegistry
-from ..reporting.handlers import available_handlers
-
-
-FINISH_EVENT_TYPE = 'finish'
-START_EVENT_TYPE = 'start'
+from .handlers import available_handlers
 
 DEFAULT_CONFIG = {
     'logging': {'type': 'log'},
 }
 
 
-class _nameset(set):
-    def __getattr__(self, name):
-        if name in self:
-            return name
-        raise AttributeError("%s not a valid value" % name)
-
-
-status = _nameset(("SUCCESS", "WARN", "FAIL"))
-
-
-class ReportingEvent(object):
-    """Encapsulation of event formatting."""
-
-    def __init__(self, event_type, name, description):
-        self.event_type = event_type
-        self.name = name
-        self.description = description
-
-    def as_string(self):
-        """The event represented as a string."""
-        return '{0}: {1}: {2}'.format(
-            self.event_type, self.name, self.description)
-
-    def as_dict(self):
-        """The event represented as a dictionary."""
-        return {'name': self.name, 'description': self.description,
-                'event_type': self.event_type}
-
-
-class FinishReportingEvent(ReportingEvent):
-
-    def __init__(self, name, description, result=status.SUCCESS):
-        super(FinishReportingEvent, self).__init__(
-            FINISH_EVENT_TYPE, name, description)
-        self.result = result
-        if result not in status:
-            raise ValueError("Invalid result: %s" % result)
-
-    def as_string(self):
-        return '{0}: {1}: {2}: {3}'.format(
-            self.event_type, self.name, self.result, self.description)
-
-    def as_dict(self):
-        """The event represented as json friendly."""
-        data = super(FinishReportingEvent, self).as_dict()
-        data['result'] = self.result
-        return data
-
-
 def update_configuration(config):
     """Update the instanciated_handler_registry.
 
@@ -90,150 +36,7 @@ def update_configuration(config):
         instantiated_handler_registry.register_item(handler_name, instance)
 
 
-def report_event(event):
-    """Report an event to all registered event handlers.
-
-    This should generally be called via one of the other functions in
-    the reporting module.
-
-    :param event_type:
-        The type of the event; this should be a constant from the
-        reporting module.
-    """
-    for _, handler in instantiated_handler_registry.registered_items.items():
-        handler.publish_event(event)
-
-
-def report_finish_event(event_name, event_description,
-                        result=status.SUCCESS):
-    """Report a "finish" event.
-
-    See :py:func:`.report_event` for parameter details.
-    """
-    event = FinishReportingEvent(event_name, event_description, result)
-    return report_event(event)
-
-
-def report_start_event(event_name, event_description):
-    """Report a "start" event.
-
-    :param event_name:
-        The name of the event; this should be a topic which events would
-        share (e.g. it will be the same for start and finish events).
-
-    :param event_description:
-        A human-readable description of the event that has occurred.
-    """
-    event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
-    return report_event(event)
-
-
-class ReportEventStack(object):
-    """Context Manager for using :py:func:`report_event`
-
-    This enables calling :py:func:`report_start_event` and
-    :py:func:`report_finish_event` through a context manager.
-
-    :param name:
-        the name of the event
-
-    :param description:
-        the event's description, passed on to :py:func:`report_start_event`
-
-    :param message:
-        the description to use for the finish event. defaults to
-        :param:description.
-
-    :param parent:
-    :type parent: :py:class:ReportEventStack or None
-        The parent of this event.  The parent is populated with
-        results of all its children.  The name used in reporting
-        is <parent.name>/<name>
-
-    :param reporting_enabled:
-        Indicates if reporting events should be generated.
-        If not provided, defaults to the parent's value, or True if no parent
-        is provided.
-
-    :param result_on_exception:
-        The result value to set if an exception is caught. default
-        value is FAIL.
-    """
-    def __init__(self, name, description, message=None, parent=None,
-                 reporting_enabled=None, result_on_exception=status.FAIL):
-        self.parent = parent
-        self.name = name
-        self.description = description
-        self.message = message
-        self.result_on_exception = result_on_exception
-        self.result = status.SUCCESS
-
-        # use parents reporting value if not provided
-        if reporting_enabled is None:
-            if parent:
-                reporting_enabled = parent.reporting_enabled
-            else:
-                reporting_enabled = True
-        self.reporting_enabled = reporting_enabled
-
-        if parent:
-            self.fullname = '/'.join((parent.fullname, name,))
-        else:
-            self.fullname = self.name
-        self.children = {}
-
-    def __repr__(self):
-        return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
-                (self.name, self.description, self.reporting_enabled))
-
-    def __enter__(self):
-        self.result = status.SUCCESS
-        if self.reporting_enabled:
-            report_start_event(self.fullname, self.description)
-        if self.parent:
-            self.parent.children[self.name] = (None, None)
-        return self
-
-    def _childrens_finish_info(self):
-        for cand_result in (status.FAIL, status.WARN):
-            for name, (value, msg) in self.children.items():
-                if value == cand_result:
-                    return (value, self.message)
-        return (self.result, self.message)
-
-    @property
-    def result(self):
-        return self._result
-
-    @result.setter
-    def result(self, value):
-        if value not in status:
-            raise ValueError("'%s' not a valid result" % value)
-        self._result = value
-
-    @property
-    def message(self):
-        if self._message is not None:
-            return self._message
-        return self.description
-
-    @message.setter
-    def message(self, value):
-        self._message = value
-
-    def _finish_info(self, exc):
-        # return tuple of description, and value
-        if exc:
-            return (self.result_on_exception, self.message)
-        return self._childrens_finish_info()
-
-    def __exit__(self, exc_type, exc_value, traceback):
-        (result, msg) = self._finish_info(exc_value)
-        if self.parent:
-            self.parent.children[self.name] = (result, msg)
-        if self.reporting_enabled:
-            report_finish_event(self.fullname, msg, result)
-
-
 instantiated_handler_registry = DictRegistry()
 update_configuration(DEFAULT_CONFIG)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
new file mode 100644
index 00000000..e35e41dd
--- /dev/null
+++ b/cloudinit/reporting/events.py
@@ -0,0 +1,210 @@
+# Copyright 2015 Canonical Ltd.
+# This file is part of cloud-init.  See LICENCE file for license information.
+#
+"""
+cloud-init events
+
+Report events in a structured manner.
+The events here are most likely used via reporting.
+"""
+
+from . import instantiated_handler_registry
+
+FINISH_EVENT_TYPE = 'finish'
+START_EVENT_TYPE = 'start'
+
+
+class _nameset(set):
+    def __getattr__(self, name):
+        if name in self:
+            return name
+        raise AttributeError("%s not a valid value" % name)
+
+
+status = _nameset(("SUCCESS", "WARN", "FAIL"))
+
+
+class ReportingEvent(object):
+    """Encapsulation of event formatting."""
+
+    def __init__(self, event_type, name, description):
+        self.event_type = event_type
+        self.name = name
+        self.description = description
+
+    def as_string(self):
+        """The event represented as a string."""
+        return '{0}: {1}: {2}'.format(
+            self.event_type, self.name, self.description)
+
+    def as_dict(self):
+        """The event represented as a dictionary."""
+        return {'name': self.name, 'description': self.description,
+                'event_type': self.event_type}
+
+
+class FinishReportingEvent(ReportingEvent):
+
+    def __init__(self, name, description, result=status.SUCCESS):
+        super(FinishReportingEvent, self).__init__(
+            FINISH_EVENT_TYPE, name, description)
+        self.result = result
+        if result not in status:
+            raise ValueError("Invalid result: %s" % result)
+
+    def as_string(self):
+        return '{0}: {1}: {2}: {3}'.format(
+            self.event_type, self.name, self.result, self.description)
+
+    def as_dict(self):
+        """The event represented as json friendly."""
+        data = super(FinishReportingEvent, self).as_dict()
+        data['result'] = self.result
+        return data
+
+
+def report_event(event):
+    """Report an event to all registered event handlers.
+
+    This should generally be called via one of the other functions in
+    the reporting module.
+
+    :param event_type:
+        The type of the event; this should be a constant from the
+        reporting module.
+    """
+    for _, handler in instantiated_handler_registry.registered_items.items():
+        handler.publish_event(event)
+
+
+def report_finish_event(event_name, event_description,
+                        result=status.SUCCESS):
+    """Report a "finish" event.
+
+    See :py:func:`.report_event` for parameter details.
+    """
+    event = FinishReportingEvent(event_name, event_description, result)
+    return report_event(event)
+
+
+def report_start_event(event_name, event_description):
+    """Report a "start" event.
+
+    :param event_name:
+        The name of the event; this should be a topic which events would
+        share (e.g. it will be the same for start and finish events).
+
+    :param event_description:
+        A human-readable description of the event that has occurred.
+    """
+    event = ReportingEvent(START_EVENT_TYPE, event_name, event_description)
+    return report_event(event)
+
+
+class ReportEventStack(object):
+    """Context Manager for using :py:func:`report_event`
+
+    This enables calling :py:func:`report_start_event` and
+    :py:func:`report_finish_event` through a context manager.
+
+    :param name:
+        the name of the event
+
+    :param description:
+        the event's description, passed on to :py:func:`report_start_event`
+
+    :param message:
+        the description to use for the finish event. defaults to
+        :param:description.
+
+    :param parent:
+    :type parent: :py:class:ReportEventStack or None
+        The parent of this event.  The parent is populated with
+        results of all its children.  The name used in reporting
+        is <parent.name>/<name>
+
+    :param reporting_enabled:
+        Indicates if reporting events should be generated.
+        If not provided, defaults to the parent's value, or True if no parent
+        is provided.
+
+    :param result_on_exception:
+        The result value to set if an exception is caught. default
+        value is FAIL.
+    """
+    def __init__(self, name, description, message=None, parent=None,
+                 reporting_enabled=None, result_on_exception=status.FAIL):
+        self.parent = parent
+        self.name = name
+        self.description = description
+        self.message = message
+        self.result_on_exception = result_on_exception
+        self.result = status.SUCCESS
+
+        # use parents reporting value if not provided
+        if reporting_enabled is None:
+            if parent:
+                reporting_enabled = parent.reporting_enabled
+            else:
+                reporting_enabled = True
+        self.reporting_enabled = reporting_enabled
+
+        if parent:
+            self.fullname = '/'.join((parent.fullname, name,))
+        else:
+            self.fullname = self.name
+        self.children = {}
+
+    def __repr__(self):
+        return ("ReportEventStack(%s, %s, reporting_enabled=%s)" %
+                (self.name, self.description, self.reporting_enabled))
+
+    def __enter__(self):
+        self.result = status.SUCCESS
+        if self.reporting_enabled:
+            report_start_event(self.fullname, self.description)
+        if self.parent:
+            self.parent.children[self.name] = (None, None)
+        return self
+
+    def _childrens_finish_info(self):
+        for cand_result in (status.FAIL, status.WARN):
+            for name, (value, msg) in self.children.items():
+                if value == cand_result:
+                    return (value, self.message)
+        return (self.result, self.message)
+
+    @property
+    def result(self):
+        return self._result
+
+    @result.setter
+    def result(self, value):
+        if value not in status:
+            raise ValueError("'%s' not a valid result" % value)
+        self._result = value
+
+    @property
+    def message(self):
+        if self._message is not None:
+            return self._message
+        return self.description
+
+    @message.setter
+    def message(self, value):
+        self._message = value
+
+    def _finish_info(self, exc):
+        # return tuple of description, and value
+        if exc:
+            return (self.result_on_exception, self.message)
+        return self._childrens_finish_info()
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        (result, msg) = self._finish_info(exc_value)
+        if self.parent:
+            self.parent.children[self.name] = (result, msg)
+        if self.reporting_enabled:
+            report_finish_event(self.fullname, msg, result)
+
+# vi: ts=4 expandtab
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 838cd198..d3cfa560 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -27,12 +27,12 @@ import six
 
 from cloudinit import importer
 from cloudinit import log as logging
-from cloudinit import reporting
 from cloudinit import type_utils
 from cloudinit import user_data as ud
 from cloudinit import util
 
 from cloudinit.filters import launch_index
+from cloudinit.reporting import events
 
 DEP_FILESYSTEM = "FILESYSTEM"
 DEP_NETWORK = "NETWORK"
@@ -254,7 +254,7 @@ def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
     LOG.debug("Searching for %s data source in: %s", mode, ds_names)
 
     for name, cls in zip(ds_names, ds_list):
-        myrep = reporting.ReportEventStack(
+        myrep = events.ReportEventStack(
             name="search-%s" % name.replace("DataSource", ""),
             description="searching for %s data from %s" % (mode, name),
             message="no %s data found from %s" % (mode, name),
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index d300709d..9f192c8d 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -46,7 +46,7 @@ from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import type_utils
 from cloudinit import util
-from cloudinit import reporting
+from cloudinit.reporting import events
 
 LOG = logging.getLogger(__name__)
 
@@ -67,7 +67,7 @@ class Init(object):
         self.datasource = NULL_DATA_SOURCE
 
         if reporter is None:
-            reporter = reporting.ReportEventStack(
+            reporter = events.ReportEventStack(
                 name="init-reporter", description="init-desc",
                 reporting_enabled=False)
         self.reporter = reporter
@@ -242,7 +242,7 @@ class Init(object):
         if self.datasource is not NULL_DATA_SOURCE:
             return self.datasource
 
-        with reporting.ReportEventStack(
+        with events.ReportEventStack(
                 name="check-cache",
                 description="attempting to read from cache",
                 parent=self.reporter) as myrep:
@@ -509,11 +509,11 @@ class Init(object):
     def consume_data(self, frequency=PER_INSTANCE):
         # Consume the userdata first, because we need want to let the part
         # handlers run first (for merging stuff)
-        with reporting.ReportEventStack(
+        with events.ReportEventStack(
             "consume-user-data", "reading and applying user-data",
             parent=self.reporter):
                 self._consume_userdata(frequency)
-        with reporting.ReportEventStack(
+        with events.ReportEventStack(
             "consume-vendor-data", "reading and applying vendor-data",
             parent=self.reporter):
                 self._consume_vendordata(frequency)
@@ -595,7 +595,7 @@ class Modules(object):
         # Created on first use
         self._cached_cfg = None
         if reporter is None:
-            reporter = reporting.ReportEventStack(
+            reporter = events.ReportEventStack(
                 name="module-reporter", description="module-desc",
                 reporting_enabled=False)
         self.reporter = reporter
@@ -710,7 +710,7 @@ class Modules(object):
                 run_name = "config-%s" % (name)
 
                 desc = "running %s with frequency %s" % (run_name, freq)
-                myrep = reporting.ReportEventStack(
+                myrep = events.ReportEventStack(
                     name=run_name, description=desc, parent=self.reporter)
 
                 with myrep:
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index 66d4e87e..bb67ef73 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -5,6 +5,7 @@
 
 from cloudinit import reporting
 from cloudinit.reporting import handlers
+from cloudinit.reporting import events
 
 from .helpers import (mock, TestCase)
 
@@ -16,12 +17,12 @@ def _fake_registry():
 
 class TestReportStartEvent(TestCase):
 
-    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+    @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
                 new_callable=_fake_registry)
     def test_report_start_event_passes_something_with_as_string_to_handlers(
             self, instantiated_handler_registry):
         event_name, event_description = 'my_test_event', 'my description'
-        reporting.report_start_event(event_name, event_description)
+        events.report_start_event(event_name, event_description)
         expected_string_representation = ': '.join(
             ['start', event_name, event_description])
         for _, handler in (
@@ -33,9 +34,9 @@ class TestReportStartEvent(TestCase):
 
 class TestReportFinishEvent(TestCase):
 
-    def _report_finish_event(self, result=reporting.status.SUCCESS):
+    def _report_finish_event(self, result=events.status.SUCCESS):
         event_name, event_description = 'my_test_event', 'my description'
-        reporting.report_finish_event(
+        events.report_finish_event(
             event_name, event_description, result=result)
         return event_name, event_description
 
@@ -46,39 +47,39 @@ class TestReportFinishEvent(TestCase):
             event = handler.publish_event.call_args[0][0]
             self.assertEqual(expected_as_string, event.as_string())
 
-    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+    @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
                 new_callable=_fake_registry)
     def test_report_finish_event_passes_something_with_as_string_to_handlers(
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event()
         expected_string_representation = ': '.join(
-            ['finish', event_name, reporting.status.SUCCESS,
+            ['finish', event_name, events.status.SUCCESS,
              event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
 
-    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+    @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
                 new_callable=_fake_registry)
     def test_reporting_successful_finish_has_sensible_string_repr(
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event(
-            result=reporting.status.SUCCESS)
+            result=events.status.SUCCESS)
         expected_string_representation = ': '.join(
-            ['finish', event_name, reporting.status.SUCCESS,
+            ['finish', event_name, events.status.SUCCESS,
              event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
 
-    @mock.patch('cloudinit.reporting.instantiated_handler_registry',
+    @mock.patch('cloudinit.reporting.events.instantiated_handler_registry',
                 new_callable=_fake_registry)
     def test_reporting_unsuccessful_finish_has_sensible_string_repr(
             self, instantiated_handler_registry):
         event_name, event_description = self._report_finish_event(
-            result=reporting.status.FAIL)
+            result=events.status.FAIL)
         expected_string_representation = ': '.join(
-            ['finish', event_name, reporting.status.FAIL, event_description])
+            ['finish', event_name, events.status.FAIL, event_description])
         self.assertHandlersPassedObjectWithAsString(
             instantiated_handler_registry.registered_items,
             expected_string_representation)
@@ -91,14 +92,14 @@ class TestReportingEvent(TestCase):
 
     def test_as_string(self):
         event_type, name, description = 'test_type', 'test_name', 'test_desc'
-        event = reporting.ReportingEvent(event_type, name, description)
+        event = events.ReportingEvent(event_type, name, description)
         expected_string_representation = ': '.join(
             [event_type, name, description])
         self.assertEqual(expected_string_representation, event.as_string())
 
     def test_as_dict(self):
         event_type, name, desc = 'test_type', 'test_name', 'test_desc'
-        event = reporting.ReportingEvent(event_type, name, desc)
+        event = events.ReportingEvent(event_type, name, desc)
         self.assertEqual(
             {'event_type': event_type, 'name': name, 'description': desc},
             event.as_dict())
@@ -106,9 +107,9 @@ class TestReportingEvent(TestCase):
 
 class TestFinishReportingEvent(TestCase):
     def test_as_has_result(self):
-        result = reporting.status.SUCCESS
+        result = events.status.SUCCESS
         name, desc = 'test_name', 'test_desc'
-        event = reporting.FinishReportingEvent(name, desc, result)
+        event = events.FinishReportingEvent(name, desc, result)
         ret = event.as_dict()
         self.assertTrue('result' in ret)
         self.assertEqual(ret['result'], result)
@@ -126,7 +127,7 @@ class TestLogHandler(TestCase):
     @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_appropriate_logger_used(self, getLogger):
         event_type, event_name = 'test_type', 'test_name'
-        event = reporting.ReportingEvent(event_type, event_name, 'description')
+        event = events.ReportingEvent(event_type, event_name, 'description')
         reporting.handlers.LogHandler().publish_event(event)
         self.assertEqual(
             [mock.call(
@@ -135,13 +136,13 @@ class TestLogHandler(TestCase):
 
     @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_single_log_message_at_info_published(self, getLogger):
-        event = reporting.ReportingEvent('type', 'name', 'description')
+        event = events.ReportingEvent('type', 'name', 'description')
         reporting.handlers.LogHandler().publish_event(event)
         self.assertEqual(1, getLogger.return_value.log.call_count)
 
     @mock.patch.object(reporting.handlers.logging, 'getLogger')
     def test_log_message_uses_event_as_string(self, getLogger):
-        event = reporting.ReportingEvent('type', 'name', 'description')
+        event = events.ReportingEvent('type', 'name', 'description')
         reporting.handlers.LogHandler(level="INFO").publish_event(event)
         self.assertIn(event.as_string(),
                       getLogger.return_value.log.call_args[0][1])
@@ -232,49 +233,49 @@ class TestReportingConfiguration(TestCase):
 
 
 class TestReportingEventStack(TestCase):
-    @mock.patch('cloudinit.reporting.report_finish_event')
-    @mock.patch('cloudinit.reporting.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
     def test_start_and_finish_success(self, report_start, report_finish):
-        with reporting.ReportEventStack(name="myname", description="mydesc"):
+        with events.ReportEventStack(name="myname", description="mydesc"):
             pass
         self.assertEqual(
             [mock.call('myname', 'mydesc')], report_start.call_args_list)
         self.assertEqual(
-            [mock.call('myname', 'mydesc', reporting.status.SUCCESS)],
+            [mock.call('myname', 'mydesc', events.status.SUCCESS)],
             report_finish.call_args_list)
 
-    @mock.patch('cloudinit.reporting.report_finish_event')
-    @mock.patch('cloudinit.reporting.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
     def test_finish_exception_defaults_fail(self, report_start, report_finish):
         name = "myname"
         desc = "mydesc"
         try:
-            with reporting.ReportEventStack(name, description=desc):
+            with events.ReportEventStack(name, description=desc):
                 raise ValueError("This didnt work")
         except ValueError:
             pass
         self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
         self.assertEqual(
-            [mock.call(name, desc, reporting.status.FAIL)],
+            [mock.call(name, desc, events.status.FAIL)],
             report_finish.call_args_list)
 
-    @mock.patch('cloudinit.reporting.report_finish_event')
-    @mock.patch('cloudinit.reporting.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
     def test_result_on_exception_used(self, report_start, report_finish):
         name = "myname"
         desc = "mydesc"
         try:
-            with reporting.ReportEventStack(
-                    name, desc, result_on_exception=reporting.status.WARN):
+            with events.ReportEventStack(
+                    name, desc, result_on_exception=events.status.WARN):
                 raise ValueError("This didnt work")
         except ValueError:
             pass
         self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
         self.assertEqual(
-            [mock.call(name, desc, reporting.status.WARN)],
+            [mock.call(name, desc, events.status.WARN)],
             report_finish.call_args_list)
 
-    @mock.patch('cloudinit.reporting.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
     def test_child_fullname_respects_parent(self, report_start):
         parent_name = "topname"
         c1_name = "c1name"
@@ -282,59 +283,59 @@ class TestReportingEventStack(TestCase):
         c2_expected_fullname = '/'.join([parent_name, c1_name, c2_name])
         c1_expected_fullname = '/'.join([parent_name, c1_name])
 
-        parent = reporting.ReportEventStack(parent_name, "topdesc")
-        c1 = reporting.ReportEventStack(c1_name, "c1desc", parent=parent)
-        c2 = reporting.ReportEventStack(c2_name, "c2desc", parent=c1)
+        parent = events.ReportEventStack(parent_name, "topdesc")
+        c1 = events.ReportEventStack(c1_name, "c1desc", parent=parent)
+        c2 = events.ReportEventStack(c2_name, "c2desc", parent=c1)
         with c1:
             report_start.assert_called_with(c1_expected_fullname, "c1desc")
             with c2:
                 report_start.assert_called_with(c2_expected_fullname, "c2desc")
 
-    @mock.patch('cloudinit.reporting.report_finish_event')
-    @mock.patch('cloudinit.reporting.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
     def test_child_result_bubbles_up(self, report_start, report_finish):
-        parent = reporting.ReportEventStack("topname", "topdesc")
-        child = reporting.ReportEventStack("c_name", "c_desc", parent=parent)
+        parent = events.ReportEventStack("topname", "topdesc")
+        child = events.ReportEventStack("c_name", "c_desc", parent=parent)
         with parent:
             with child:
-                child.result = reporting.status.WARN
+                child.result = events.status.WARN
 
         report_finish.assert_called_with(
-            "topname", "topdesc", reporting.status.WARN)
+            "topname", "topdesc", events.status.WARN)
 
-    @mock.patch('cloudinit.reporting.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
     def test_message_used_in_finish(self, report_finish):
-        with reporting.ReportEventStack("myname", "mydesc",
-                                        message="mymessage"):
+        with events.ReportEventStack("myname", "mydesc",
+                                     message="mymessage"):
             pass
         self.assertEqual(
-            [mock.call("myname", "mymessage", reporting.status.SUCCESS)],
+            [mock.call("myname", "mymessage", events.status.SUCCESS)],
             report_finish.call_args_list)
 
-    @mock.patch('cloudinit.reporting.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
     def test_message_updatable(self, report_finish):
-        with reporting.ReportEventStack("myname", "mydesc") as c:
+        with events.ReportEventStack("myname", "mydesc") as c:
             c.message = "all good"
         self.assertEqual(
-            [mock.call("myname", "all good", reporting.status.SUCCESS)],
+            [mock.call("myname", "all good", events.status.SUCCESS)],
             report_finish.call_args_list)
 
-    @mock.patch('cloudinit.reporting.report_start_event')
-    @mock.patch('cloudinit.reporting.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
     def test_reporting_disabled_does_not_report_events(
             self, report_start, report_finish):
-        with reporting.ReportEventStack("a", "b", reporting_enabled=False):
+        with events.ReportEventStack("a", "b", reporting_enabled=False):
             pass
         self.assertEqual(report_start.call_count, 0)
         self.assertEqual(report_finish.call_count, 0)
 
-    @mock.patch('cloudinit.reporting.report_start_event')
-    @mock.patch('cloudinit.reporting.report_finish_event')
+    @mock.patch('cloudinit.reporting.events.report_start_event')
+    @mock.patch('cloudinit.reporting.events.report_finish_event')
     def test_reporting_child_default_to_parent(
             self, report_start, report_finish):
-        parent = reporting.ReportEventStack(
+        parent = events.ReportEventStack(
             "pname", "pdesc", reporting_enabled=False)
-        child = reporting.ReportEventStack("cname", "cdesc", parent=parent)
+        child = events.ReportEventStack("cname", "cdesc", parent=parent)
         with parent:
             with child:
                 pass
@@ -343,17 +344,17 @@ class TestReportingEventStack(TestCase):
         self.assertEqual(report_finish.call_count, 0)
 
     def test_reporting_event_has_sane_repr(self):
-        myrep = reporting.ReportEventStack("fooname", "foodesc",
-                                           reporting_enabled=True).__repr__()
+        myrep = events.ReportEventStack("fooname", "foodesc",
+                                        reporting_enabled=True).__repr__()
         self.assertIn("fooname", myrep)
         self.assertIn("foodesc", myrep)
         self.assertIn("True", myrep)
 
     def test_set_invalid_result_raises_value_error(self):
-        f = reporting.ReportEventStack("myname", "mydesc")
+        f = events.ReportEventStack("myname", "mydesc")
         self.assertRaises(ValueError, setattr, f, "result", "BOGUS")
 
 
 class TestStatusAccess(TestCase):
     def test_invalid_status_access_raises_value_error(self):
-        self.assertRaises(AttributeError, getattr, reporting.status, "BOGUS")
+        self.assertRaises(AttributeError, getattr, events.status, "BOGUS")
-- 
cgit v1.2.3


From 7820a43baf94e11ce458476a442edd726a406aba Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 31 Aug 2015 13:57:05 -0400
Subject: events: add timestamp and origin, support file posting

This adds 'timestamp' and 'origin' to events.
The timestamp is simply that, a floating point timestamp of when
the event occurred.

The origin indicates the source / reporter of this.  It is useful
to have a single endpoint with multiple different things reporting
to it.  For example, MAAS will configure cloud-init and curtin
to report to the same endpoint and then it can differenciate who
made the post.  Admittedly, they could use multiple endpoints, but
this this seems sane.

Also, add support for posting files at the close of an event.
This is utilized in curtin to post a log file when the install is
done.  files are posted on success or fail of the event.
---
 cloudinit/reporting/events.py     | 58 +++++++++++++++++++++++++++++++--------
 tests/unittests/test_reporting.py | 15 ++++++----
 2 files changed, 56 insertions(+), 17 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/events.py b/cloudinit/reporting/events.py
index e35e41dd..2f767f64 100644
--- a/cloudinit/reporting/events.py
+++ b/cloudinit/reporting/events.py
@@ -2,17 +2,22 @@
 # This file is part of cloud-init.  See LICENCE file for license information.
 #
 """
-cloud-init events
+events for reporting.
 
-Report events in a structured manner.
-The events here are most likely used via reporting.
+The events here are designed to be used with reporting.
+They can be published to registered handlers with report_event.
 """
+import base64
+import os.path
+import time
 
 from . import instantiated_handler_registry
 
 FINISH_EVENT_TYPE = 'finish'
 START_EVENT_TYPE = 'start'
 
+DEFAULT_EVENT_ORIGIN = 'cloudinit'
+
 
 class _nameset(set):
     def __getattr__(self, name):
@@ -27,10 +32,13 @@ status = _nameset(("SUCCESS", "WARN", "FAIL"))
 class ReportingEvent(object):
     """Encapsulation of event formatting."""
 
-    def __init__(self, event_type, name, description):
+    def __init__(self, event_type, name, description,
+                 origin=DEFAULT_EVENT_ORIGIN, timestamp=time.time()):
         self.event_type = event_type
         self.name = name
         self.description = description
+        self.origin = origin
+        self.timestamp = timestamp
 
     def as_string(self):
         """The event represented as a string."""
@@ -40,15 +48,20 @@ class ReportingEvent(object):
     def as_dict(self):
         """The event represented as a dictionary."""
         return {'name': self.name, 'description': self.description,
-                'event_type': self.event_type}
+                'event_type': self.event_type, 'origin': self.origin,
+                'timestamp': self.timestamp}
 
 
 class FinishReportingEvent(ReportingEvent):
 
-    def __init__(self, name, description, result=status.SUCCESS):
+    def __init__(self, name, description, result=status.SUCCESS,
+                 post_files=None):
         super(FinishReportingEvent, self).__init__(
             FINISH_EVENT_TYPE, name, description)
         self.result = result
+        if post_files is None:
+            post_files = []
+        self.post_files = post_files
         if result not in status:
             raise ValueError("Invalid result: %s" % result)
 
@@ -60,6 +73,8 @@ class FinishReportingEvent(ReportingEvent):
         """The event represented as json friendly."""
         data = super(FinishReportingEvent, self).as_dict()
         data['result'] = self.result
+        if self.post_files:
+            data['files'] = _collect_file_info(self.post_files)
         return data
 
 
@@ -78,12 +93,13 @@ def report_event(event):
 
 
 def report_finish_event(event_name, event_description,
-                        result=status.SUCCESS):
+                        result=status.SUCCESS, post_files=None):
     """Report a "finish" event.
 
     See :py:func:`.report_event` for parameter details.
     """
-    event = FinishReportingEvent(event_name, event_description, result)
+    event = FinishReportingEvent(event_name, event_description, result,
+                                 post_files=post_files)
     return report_event(event)
 
 
@@ -133,13 +149,17 @@ class ReportEventStack(object):
         value is FAIL.
     """
     def __init__(self, name, description, message=None, parent=None,
-                 reporting_enabled=None, result_on_exception=status.FAIL):
+                 reporting_enabled=None, result_on_exception=status.FAIL,
+                 post_files=None):
         self.parent = parent
         self.name = name
         self.description = description
         self.message = message
         self.result_on_exception = result_on_exception
         self.result = status.SUCCESS
+        if post_files is None:
+            post_files = []
+        self.post_files = post_files
 
         # use parents reporting value if not provided
         if reporting_enabled is None:
@@ -205,6 +225,22 @@ class ReportEventStack(object):
         if self.parent:
             self.parent.children[self.name] = (result, msg)
         if self.reporting_enabled:
-            report_finish_event(self.fullname, msg, result)
+            report_finish_event(self.fullname, msg, result,
+                                post_files=self.post_files)
+
+
+def _collect_file_info(files):
+    if not files:
+        return None
+    ret = []
+    for fname in files:
+        if not os.path.isfile(fname):
+            content = None
+        else:
+            with open(fname, "rb") as fp:
+                content = base64.b64encode(fp.read()).decode()
+        ret.append({'path': fname, 'content': content,
+                    'encoding': 'base64'})
+    return ret
 
-# vi: ts=4 expandtab
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_reporting.py b/tests/unittests/test_reporting.py
index bb67ef73..0a441adf 100644
--- a/tests/unittests/test_reporting.py
+++ b/tests/unittests/test_reporting.py
@@ -241,7 +241,8 @@ class TestReportingEventStack(TestCase):
         self.assertEqual(
             [mock.call('myname', 'mydesc')], report_start.call_args_list)
         self.assertEqual(
-            [mock.call('myname', 'mydesc', events.status.SUCCESS)],
+            [mock.call('myname', 'mydesc', events.status.SUCCESS,
+                       post_files=[])],
             report_finish.call_args_list)
 
     @mock.patch('cloudinit.reporting.events.report_finish_event')
@@ -256,7 +257,7 @@ class TestReportingEventStack(TestCase):
             pass
         self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
         self.assertEqual(
-            [mock.call(name, desc, events.status.FAIL)],
+            [mock.call(name, desc, events.status.FAIL, post_files=[])],
             report_finish.call_args_list)
 
     @mock.patch('cloudinit.reporting.events.report_finish_event')
@@ -272,7 +273,7 @@ class TestReportingEventStack(TestCase):
             pass
         self.assertEqual([mock.call(name, desc)], report_start.call_args_list)
         self.assertEqual(
-            [mock.call(name, desc, events.status.WARN)],
+            [mock.call(name, desc, events.status.WARN, post_files=[])],
             report_finish.call_args_list)
 
     @mock.patch('cloudinit.reporting.events.report_start_event')
@@ -301,7 +302,7 @@ class TestReportingEventStack(TestCase):
                 child.result = events.status.WARN
 
         report_finish.assert_called_with(
-            "topname", "topdesc", events.status.WARN)
+            "topname", "topdesc", events.status.WARN, post_files=[])
 
     @mock.patch('cloudinit.reporting.events.report_finish_event')
     def test_message_used_in_finish(self, report_finish):
@@ -309,7 +310,8 @@ class TestReportingEventStack(TestCase):
                                      message="mymessage"):
             pass
         self.assertEqual(
-            [mock.call("myname", "mymessage", events.status.SUCCESS)],
+            [mock.call("myname", "mymessage", events.status.SUCCESS,
+                       post_files=[])],
             report_finish.call_args_list)
 
     @mock.patch('cloudinit.reporting.events.report_finish_event')
@@ -317,7 +319,8 @@ class TestReportingEventStack(TestCase):
         with events.ReportEventStack("myname", "mydesc") as c:
             c.message = "all good"
         self.assertEqual(
-            [mock.call("myname", "all good", events.status.SUCCESS)],
+            [mock.call("myname", "all good", events.status.SUCCESS,
+                       post_files=[])],
             report_finish.call_args_list)
 
     @mock.patch('cloudinit.reporting.events.report_start_event')
-- 
cgit v1.2.3


From 8bcccd07d1dbde74126e81967388d2e5a90fcfa7 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 31 Aug 2015 14:10:54 -0400
Subject: handlers: docstring fixups, and print actually do something

---
 cloudinit/reporting/handlers.py | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 5ed3cb84..140a98c5 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -22,12 +22,11 @@ class ReportingHandler(object):
 
     @abc.abstractmethod
     def publish_event(self, event):
-        """Publish an event to the ``INFO`` log level."""
-        print(event.as_string())
+        """Publish an event."""
 
 
 class LogHandler(ReportingHandler):
-    """Publishes events to the cloud-init log at the ``INFO`` log level."""
+    """Publishes events to the cloud-init log at the ``DEBUG`` log level."""
 
     def __init__(self, level="DEBUG"):
         super(LogHandler, self).__init__()
@@ -43,15 +42,16 @@ class LogHandler(ReportingHandler):
         self.level = level
 
     def publish_event(self, event):
-        """Publish an event to the ``INFO`` log level."""
         logger = logging.getLogger(
             '.'.join(['cloudinit', 'reporting', event.event_type, event.name]))
         logger.log(self.level, event.as_string())
 
 
 class PrintHandler(ReportingHandler):
+    """Print the event as a string."""
+
     def publish_event(self, event):
-        """Publish an event to the ``INFO`` log level."""
+        print(event.as_string())
 
 
 class WebHookHandler(ReportingHandler):
-- 
cgit v1.2.3


From 3f2dddae6e8d5148bcf89c2b4e27975d1da77aea Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 31 Aug 2015 14:11:47 -0400
Subject: handlers: drop unused import

this import was left over from before we moved oauthlib into url_helper
---
 cloudinit/reporting/handlers.py | 1 -
 1 file changed, 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index 140a98c5..ba480da0 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,7 +1,6 @@
 # vi: ts=4 expandtab
 
 import abc
-import oauthlib.oauth1 as oauth1
 import six
 
 from ..registry import DictRegistry
-- 
cgit v1.2.3


From 8cece8b8b6fd12b6df554413894afbf1ae93d18f Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Tue, 1 Sep 2015 18:26:03 +0100
Subject: Handle symlink mount points in mount_cb.

The Azure data source now uses a /dev/disk symlink to identify devices,
but the dereferenced version of this appears in the mount table.
mount_cb therefore doesn't identify when a disk is already mounted, and
attempts to mount it a second time (which fails with NTFS).
---
 cloudinit/util.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 09e583f5..83c2c0d2 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -1480,8 +1480,8 @@ def mount_cb(device, callback, data=None, rw=False, mtype=None, sync=True):
     mounted = mounts()
     with tempdir() as tmpd:
         umount = False
-        if device in mounted:
-            mountpoint = mounted[device]['mountpoint']
+        if os.path.realpath(device) in mounted:
+            mountpoint = mounted[os.path.realpath(device)]['mountpoint']
         else:
             failure_reason = None
             for mtype in mtypes:
-- 
cgit v1.2.3


From 3c39c3f7638245e9581a2e1f4faae2dc2680f0c7 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 8 Sep 2015 14:26:30 -0400
Subject: NoCloud: fix consumption of vendor-data

the content of vendordata was was being assigned to vendordata,
rather than vendordata_raw.  The result was that it is not processed
for includes or part handlers or other things as it is in other
datasources.

LP: #1493453
---
 ChangeLog                                       | 1 +
 cloudinit/sources/DataSourceNoCloud.py          | 2 +-
 tests/unittests/test_datasource/test_nocloud.py | 2 +-
 3 files changed, 3 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 7869ab7e..6fb70696 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -60,6 +60,7 @@
  - rsyslog: add additional configuration mode (LP: #1478103)
  - status_wrapper in main: fix use of print_exc when handling exception
  - reporting: add reporting module for web hook or logging of events.
+ - NoCloud: fix consumption of vendordata (LP: #1493453)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index 6a861af3..4dffe6e6 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -190,7 +190,7 @@ class DataSourceNoCloud(sources.DataSource):
             self.seed = ",".join(found)
             self.metadata = mydata['meta-data']
             self.userdata_raw = mydata['user-data']
-            self.vendordata = mydata['vendor-data']
+            self.vendordata_raw = mydata['vendor-data']
             return True
 
         LOG.debug("%s: not claiming datasource, dsmode=%s", self,
diff --git a/tests/unittests/test_datasource/test_nocloud.py b/tests/unittests/test_datasource/test_nocloud.py
index 85b4c25a..2d5fc37c 100644
--- a/tests/unittests/test_datasource/test_nocloud.py
+++ b/tests/unittests/test_datasource/test_nocloud.py
@@ -121,7 +121,7 @@ class TestNoCloudDataSource(TestCase):
         ret = dsrc.get_data()
         self.assertEqual(dsrc.userdata_raw, ud)
         self.assertEqual(dsrc.metadata, md)
-        self.assertEqual(dsrc.vendordata, vd)
+        self.assertEqual(dsrc.vendordata_raw, vd)
         self.assertTrue(ret)
 
     def test_nocloud_no_vendordata(self):
-- 
cgit v1.2.3


From ba3e59cbb5ae58a2267fcbcd23eecaaa26f2c396 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 8 Sep 2015 16:53:59 -0400
Subject:   power_state: support 'condition' argument

  if 'condition' is provided to config in power_state, then
  consult it before powering off.

  This allows the user to shut down only if a condition is met, and
  leave the system in a debuggable state otherwise.

  An example is as simple as:
   power_state:
     mode: poweroff
     condition: ['sh', '-c', '[ -f /disable-poweroff ]']
---
 ChangeLog                                          |  1 +
 cloudinit/config/cc_power_state_change.py          | 57 +++++++++++++++++++---
 doc/examples/cloud-config-power-state.txt          |  9 ++++
 .../test_handler/test_handler_power_state.py       | 48 ++++++++++++++++--
 4 files changed, 105 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 6fb70696..bbb7e990 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -61,6 +61,7 @@
  - status_wrapper in main: fix use of print_exc when handling exception
  - reporting: add reporting module for web hook or logging of events.
  - NoCloud: fix consumption of vendordata (LP: #1493453)
+ - power_state_change: support 'condition' to disable or enable poweroff
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 09d37371..7d9567e3 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -22,6 +22,7 @@ from cloudinit import util
 import errno
 import os
 import re
+import six
 import subprocess
 import time
 
@@ -48,10 +49,40 @@ def givecmdline(pid):
         return None
 
 
+def check_condition(cond, log=None):
+    if isinstance(cond, bool):
+        if log:
+            log.debug("Static Condition: %s" % cond)
+        return cond
+
+    pre = "check_condition command (%s): " % cond
+    try:
+        proc = subprocess.Popen(cond, shell=not isinstance(cond, list))
+        proc.communicate()
+        ret = proc.returncode
+        if ret == 0:
+            if log:
+                log.debug(pre + "exited 0. condition met.")
+            return True
+        elif ret == 1:
+            if log:
+                log.debug(pre + "exited 1. condition not met.")
+            return False
+        else:
+            if log:
+                log.warn(pre + "unexpected exit %s. " % ret +
+                         "do not apply change.")
+            return False
+    except Exception as e:
+        if log:
+            log.warn(pre + "Unexpected error: %s" % e)
+        return False
+
+
 def handle(_name, cfg, _cloud, log, _args):
 
     try:
-        (args, timeout) = load_power_state(cfg)
+        (args, timeout, condition) = load_power_state(cfg)
         if args is None:
             log.debug("no power_state provided. doing nothing")
             return
@@ -59,6 +90,10 @@ def handle(_name, cfg, _cloud, log, _args):
         log.warn("%s Not performing power state change!" % str(e))
         return
 
+    if condition is False:
+        log.debug("Condition was false. Will not perform state change.")
+        return
+
     mypid = os.getpid()
 
     cmdline = givecmdline(mypid)
@@ -70,8 +105,8 @@ def handle(_name, cfg, _cloud, log, _args):
 
     log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
 
-    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, execmd,
-                 [args, devnull_fp])
+    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, 
+                 condition, execmd, [args, devnull_fp])
 
 
 def load_power_state(cfg):
@@ -80,7 +115,7 @@ def load_power_state(cfg):
     pstate = cfg.get('power_state')
 
     if pstate is None:
-        return (None, None)
+        return (None, None, None)
 
     if not isinstance(pstate, dict):
         raise TypeError("power_state is not a dict.")
@@ -115,7 +150,10 @@ def load_power_state(cfg):
         raise ValueError("failed to convert timeout '%s' to float." %
                          pstate['timeout'])
 
-    return (args, timeout)
+    condition = pstate.get("condition", True)
+    if not isinstance(condition, six.string_types + (list, bool)):
+        raise TypeError("condition type %s invalid. must be list, bool, str")
+    return (args, timeout, condition)
 
 
 def doexit(sysexit):
@@ -133,7 +171,7 @@ def execmd(exe_args, output=None, data_in=None):
     doexit(ret)
 
 
-def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
+def run_after_pid_gone(pid, pidcmdline, timeout, log, condition, func, args):
     # wait until pid, with /proc/pid/cmdline contents of pidcmdline
     # is no longer alive.  After it is gone, or timeout has passed
     # execute func(args)
@@ -175,4 +213,11 @@ def run_after_pid_gone(pid, pidcmdline, timeout, log, func, args):
 
     if log:
         log.debug(msg)
+
+    try:
+        if not check_condition(condition, log):
+            return
+    except Exception as e:
+        fatal("Unexpected Exception when checking condition: %s" % e)
+
     func(*args)
diff --git a/doc/examples/cloud-config-power-state.txt b/doc/examples/cloud-config-power-state.txt
index 8df14366..b470153d 100644
--- a/doc/examples/cloud-config-power-state.txt
+++ b/doc/examples/cloud-config-power-state.txt
@@ -23,9 +23,18 @@
 # message: provided as the message argument to 'shutdown'. default is none.
 # timeout: the amount of time to give the cloud-init process to finish
 #          before executing shutdown.
+# condition: apply state change only if condition is met.
+#            May be boolean True (always met), or False (never met),
+#            or a command string or list to be executed.
+#            command's exit code indicates:
+#               0: condition met
+#               1: condition not met
+#            other exit codes will result in 'not met', but are reserved
+#            for future use.
 #
 power_state:
  delay: "+30"
  mode: poweroff
  message: Bye Bye
  timeout: 30
+ condition: True
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 2f86b8f8..5687b10d 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -1,6 +1,9 @@
+import sys
+
 from cloudinit.config import cc_power_state_change as psc
 
 from .. import helpers as t_help
+from ..helpers import mock
 
 
 class TestLoadPowerState(t_help.TestCase):
@@ -9,12 +12,12 @@ class TestLoadPowerState(t_help.TestCase):
 
     def test_no_config(self):
         # completely empty config should mean do nothing
-        (cmd, _timeout) = psc.load_power_state({})
+        (cmd, _timeout, _condition) = psc.load_power_state({})
         self.assertEqual(cmd, None)
 
     def test_irrelevant_config(self):
         # no power_state field in config should return None for cmd
-        (cmd, _timeout) = psc.load_power_state({'foo': 'bar'})
+        (cmd, _timeout, _condition) = psc.load_power_state({'foo': 'bar'})
         self.assertEqual(cmd, None)
 
     def test_invalid_mode(self):
@@ -53,23 +56,60 @@ class TestLoadPowerState(t_help.TestCase):
     def test_no_message(self):
         # if message is not present, then no argument should be passed for it
         cfg = {'power_state': {'mode': 'poweroff'}}
-        (cmd, _timeout) = psc.load_power_state(cfg)
+        (cmd, _timeout, _condition) = psc.load_power_state(cfg)
         self.assertNotIn("", cmd)
         check_lps_ret(psc.load_power_state(cfg))
         self.assertTrue(len(cmd) == 3)
 
+    def test_condition_null_raises(self):
+        cfg = {'power_state': {'mode': 'poweroff', 'condition': None}}
+        self.assertRaises(TypeError, psc.load_power_state, cfg)
+
+    def test_condition_default_is_true(self):
+        cfg = {'power_state': {'mode': 'poweroff'}}
+        _cmd, _timeout, cond = psc.load_power_state(cfg)
+        self.assertEqual(cond, True)
+
+
+class TestCheckCondition(t_help.TestCase):
+    def cmd_with_exit(self, rc):
+        return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
+        
+    def test_true_is_true(self):
+        self.assertEqual(psc.check_condition(True), True)
+
+    def test_false_is_false(self):
+        self.assertEqual(psc.check_condition(False), False)
+
+    def test_cmd_exit_zero_true(self):
+        self.assertEqual(psc.check_condition(self.cmd_with_exit(0)), True)
+
+    def test_cmd_exit_one_false(self):
+        self.assertEqual(psc.check_condition(self.cmd_with_exit(1)), False)
+
+    def test_cmd_exit_nonzero_warns(self):
+        mocklog = mock.Mock()
+        self.assertEqual(
+            psc.check_condition(self.cmd_with_exit(2), mocklog), False)
+        self.assertEqual(mocklog.warn.call_count, 1)
+
+
 
 def check_lps_ret(psc_return, mode=None):
-    if len(psc_return) != 2:
+    if len(psc_return) != 3:
         raise TypeError("length returned = %d" % len(psc_return))
 
     errs = []
     cmd = psc_return[0]
     timeout = psc_return[1]
+    condition = psc_return[2]
 
     if 'shutdown' not in psc_return[0][0]:
         errs.append("string 'shutdown' not in cmd")
 
+    if 'condition' is None:
+        errs.append("condition was not returned")
+
     if mode is not None:
         opt = {'halt': '-H', 'poweroff': '-P', 'reboot': '-r'}[mode]
         if opt not in psc_return[0]:
-- 
cgit v1.2.3


From 6f2b8551e72596adfc685357d8471c454bd96d63 Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Fri, 11 Sep 2015 13:38:14 -0600
Subject: Ubuntu Snappy: conditionally enable SSH on Snappy   When a user
 provides authentication tokens, enable SSH unless SSH has   been explicitly
 disabled (LP: #1494816).

---
 cloudinit/config/cc_snappy.py | 15 ++++++++++++++-
 1 file changed, 14 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 7aaec94a..e36542bf 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -274,7 +274,20 @@ def handle(name, cfg, cloud, log, args):
             LOG.warn("'%s' failed for '%s': %s",
                      pkg_op['op'], pkg_op['name'], e)
 
-    disable_enable_ssh(mycfg.get('ssh_enabled', False))
+    # Default to disabling SSH
+    ssh_enabled = mycfg.get('ssh_enabled', False)
+
+    # If the user has not explicitly enabled or disabled SSH, then enable it
+    # when password SSH authentication is requested or there are SSH keys
+    if mycfg.get('ssh_enabled', None) is not False:
+        if len(mycfg.get('public-keys', [])) > 0:
+            LOG.debug("Enabling SSH, user SSH keys provided")
+            ssh_enabled = True
+        elif mycfg.get('ssh_pwauth', False):
+            LOG.debug("Enabling SSH, password authentication requested")
+            ssh_enabled = True
+
+    disable_enable_ssh(ssh_enabled)
 
     if fails:
         raise Exception("failed to install/configure snaps")
-- 
cgit v1.2.3


From fd6b08c4d03b07be67398450e40e7e2f91e8db51 Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Fri, 11 Sep 2015 14:04:52 -0600
Subject: Refinements on SSH enablement

---
 cloudinit/config/cc_snappy.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index e36542bf..899df10c 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -280,10 +280,12 @@ def handle(name, cfg, cloud, log, args):
     # If the user has not explicitly enabled or disabled SSH, then enable it
     # when password SSH authentication is requested or there are SSH keys
     if mycfg.get('ssh_enabled', None) is not False:
-        if len(mycfg.get('public-keys', [])) > 0:
+        user_ssh_keys = cloud.get_public_ssh_keys() or None
+        password_auth_enabled = cfg.get('ssh_pwauth', False)
+        if user_ssh_keys:
             LOG.debug("Enabling SSH, user SSH keys provided")
             ssh_enabled = True
-        elif mycfg.get('ssh_pwauth', False):
+        elif password_auth_enabled:
             LOG.debug("Enabling SSH, password authentication requested")
             ssh_enabled = True
 
-- 
cgit v1.2.3


From 988174dca9e4e5593b357c6def82c857f718282d Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 11 Sep 2015 16:52:26 -0400
Subject: cc_snappy: update doc string, change default to 'auto'

---
 cloudinit/config/cc_snappy.py | 19 ++++++++++++++-----
 1 file changed, 14 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 899df10c..124452c0 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -6,7 +6,7 @@ Example config:
   #cloud-config
   snappy:
     system_snappy: auto
-    ssh_enabled: False
+    ssh_enabled: auto
     packages: [etcd, pkg2.smoser]
     config:
       pkgname:
@@ -16,7 +16,12 @@ Example config:
     packages_dir: '/writable/user-data/cloud-init/snaps'
 
  - ssh_enabled:
-   This defaults to 'False'.  Set to a non-false value to enable ssh service
+   This controls the system's ssh service.  The default value is 'auto'.
+     True:  enable ssh service
+     False: disable ssh service
+     auto:  enable ssh service if either ssh keys have been provided
+            or user has requested password authentication (ssh_pwauth).
+
  - snap installation and config
    The above would install 'etcd', and then install 'pkg2.smoser' with a
    '<config-file>' argument where 'config-file' has 'config-blob' inside it.
@@ -275,19 +280,23 @@ def handle(name, cfg, cloud, log, args):
                      pkg_op['op'], pkg_op['name'], e)
 
     # Default to disabling SSH
-    ssh_enabled = mycfg.get('ssh_enabled', False)
+    ssh_enabled = mycfg.get('ssh_enabled', "auto")
 
     # If the user has not explicitly enabled or disabled SSH, then enable it
     # when password SSH authentication is requested or there are SSH keys
-    if mycfg.get('ssh_enabled', None) is not False:
+    if ssh_enabled == "auto":
         user_ssh_keys = cloud.get_public_ssh_keys() or None
         password_auth_enabled = cfg.get('ssh_pwauth', False)
         if user_ssh_keys:
-            LOG.debug("Enabling SSH, user SSH keys provided")
+            LOG.debug("Enabling SSH, ssh keys found in datasource")
             ssh_enabled = True
+        elif cfg.get('ssh_authorized_keys'):
+            LOG.debug("Enabling SSH, ssh keys found in config")
         elif password_auth_enabled:
             LOG.debug("Enabling SSH, password authentication requested")
             ssh_enabled = True
+    elif ssh_enabled not in (True, False):
+        LOG.warn("Unknown value '%s' in ssh_enabled", ssh_enabled)
 
     disable_enable_ssh(ssh_enabled)
 
-- 
cgit v1.2.3


From 03b5cac37154476b89e67b231c2888a9cfdc92ca Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@ubuntu.com>
Date: Tue, 15 Sep 2015 11:53:36 -0600
Subject: Change Snappy SSH enabled default from false to 'auto' (LP: #1494816)

---
 cloudinit/config/cc_snappy.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 124452c0..fa9d54a0 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -63,7 +63,7 @@ NAMESPACE_DELIM = '.'
 BUILTIN_CFG = {
     'packages': [],
     'packages_dir': '/writable/user-data/cloud-init/snaps',
-    'ssh_enabled': False,
+    'ssh_enabled': "auto",
     'system_snappy': "auto",
     'config': {},
 }
-- 
cgit v1.2.3


From 4558922ac6d8ae129b1f47e124c6b08008e7548f Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 17 Sep 2015 15:56:51 -0400
Subject: webhook: report with json data

the handler was passing a dictionary to readurl
which was then passing that on to requests.request as 'data'.
the requests library would urlencode that, but we want the
json data posted instead.

LP: #1496960
---
 cloudinit/reporting/handlers.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/reporting/handlers.py b/cloudinit/reporting/handlers.py
index ba480da0..3212d173 100644
--- a/cloudinit/reporting/handlers.py
+++ b/cloudinit/reporting/handlers.py
@@ -1,6 +1,7 @@
 # vi: ts=4 expandtab
 
 import abc
+import json
 import six
 
 from ..registry import DictRegistry
@@ -77,7 +78,7 @@ class WebHookHandler(ReportingHandler):
             readurl = url_helper.readurl
         try:
             return readurl(
-                self.endpoint, data=event.as_dict(),
+                self.endpoint, data=json.dumps(event.as_dict()),
                 timeout=self.timeout,
                 retries=self.retries, ssl_details=self.ssl_details)
         except:
-- 
cgit v1.2.3


From e9e86164198993aca13148872afdeebaae751c2c Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 29 Sep 2015 17:17:49 -0400
Subject: MAAS: fix issues with url_helper and oauth module

This would cause problems in the event that we actually had a bad
clock.  We add a retry in the main (for test) also, to ensure that
the oauth timestamp fix gets in place.

LP: #1499869
---
 cloudinit/sources/DataSourceMAAS.py | 20 +++++++++++++-------
 cloudinit/url_helper.py             |  8 ++++++--
 2 files changed, 19 insertions(+), 9 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index 6c95c218..cfc59ca5 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -88,6 +88,10 @@ class DataSourceMAAS(sources.DataSource):
             return False
 
         try:
+            # doing this here actually has a side affect of
+            # getting oauth time-fix in place.  As no where else would
+            # retry by default, so even if we could fix the timestamp
+            # we would not.
             if not self.wait_for_metadata_service(url):
                 return False
 
@@ -95,7 +99,7 @@ class DataSourceMAAS(sources.DataSource):
 
             (userdata, metadata) = read_maas_seed_url(
                 self.base_url, read_file_or_url=self.oauth_helper.readurl,
-                paths=self.paths)
+                paths=self.paths, retries=1)
             self.userdata_raw = userdata
             self.metadata = metadata
             return True
@@ -161,7 +165,7 @@ def read_maas_seed_dir(seed_d):
 
 
 def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
-                       version=MD_VERSION, paths=None):
+                       version=MD_VERSION, paths=None, retries=None):
     """
     Read the maas datasource at seed_url.
       read_file_or_url is a method that should provide an interface
@@ -193,13 +197,13 @@ def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
     for name in file_order:
         url = files.get(name)
         if name == 'user-data':
-            retries = 0
+            item_retries = 0
         else:
-            retries = None
+            item_retries = retries
 
         try:
             ssl_details = util.fetch_ssl_details(paths)
-            resp = read_file_or_url(url, retries=retries,
+            resp = read_file_or_url(url, retries=item_retries,
                                     timeout=timeout, ssl_details=ssl_details)
             if resp.ok():
                 if name in BINARY_FIELDS:
@@ -306,7 +310,8 @@ if __name__ == "__main__":
         oauth_helper = url_helper.OauthUrlHelper(**creds)
 
         def geturl(url):
-            return oauth_helper.readurl(url).contents
+            # the retry is to ensure that oauth timestamp gets fixed
+            return oauth_helper.readurl(url, retries=1).contents
 
         def printurl(url):
             print("== %s ==\n%s\n" % (url, geturl(url).decode()))
@@ -329,7 +334,8 @@ if __name__ == "__main__":
             if args.url[0] == "/" or args.url.startswith("file://"):
                 readurl = None
             (userdata, metadata) = read_maas_seed_url(
-                args.url, version=args.apiver, read_file_or_url=readurl)
+                args.url, version=args.apiver, read_file_or_url=readurl,
+                retries=2)
             print("=== userdata ===")
             print(userdata.decode())
             print("=== metadata ===")
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index a93847ce..f2e1390e 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -264,7 +264,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
                     # ssl exceptions are not going to get fixed by waiting a
                     # few seconds
                     break
-            if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
+            if exception_cb and exception_cb(req_args.copy(), excps[-1]):
+                # if an exception callback was given it should return None
+                # a true-ish value means to break and re-raise the exception
                 break
             if i + 1 < manual_tries and sec_between > 0:
                 LOG.debug("Please wait %s seconds while we wait to try again",
@@ -404,7 +406,7 @@ class OauthUrlHelper(object):
     def read_skew_file(self):
         if self.skew_data_file and os.path.isfile(self.skew_data_file):
             with open(self.skew_data_file, mode="r") as fp:
-                return json.load(fp.read())
+                return json.load(fp)
         return None
 
     def update_skew_file(self, host, value):
@@ -412,6 +414,8 @@ class OauthUrlHelper(object):
         if not self.skew_data_file:
             return
         cur = self.read_skew_file()
+        if cur is None:
+            cur = {}
         cur[host] = value
         with open(self.skew_data_file, mode="w") as fp:
             fp.write(json.dumps(cur))
-- 
cgit v1.2.3


From 41900b72f31a1bd0eebe2f58a8598bfab25f0003 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 9 Oct 2015 14:01:11 +0100
Subject: Handle escaped quotes in WALinuxAgentShim.find_endpoint.

This fixes bug 1488891.
---
 cloudinit/sources/helpers/azure.py                   |  2 +-
 tests/unittests/test_datasource/test_azure_helper.py | 10 +++++++++-
 2 files changed, 10 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 281d733e..33003da0 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -233,7 +233,7 @@ class WALinuxAgentShim(object):
                 hex_string += hex_pair
             value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
         else:
-            value = value.encode('utf-8')
+            value = value.replace('\\', '').encode('utf-8')
         endpoint_ip_address = socket.inet_ntoa(value)
         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
         return endpoint_ip_address
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index a5228870..68af31cd 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -97,7 +97,8 @@ class TestFindEndpoint(TestCase):
         if not use_hex:
             ip_address_repr = struct.pack(
                 '>L', int(ip_address_repr.replace(':', ''), 16))
-            ip_address_repr = '"{0}"'.format(ip_address_repr.decode('utf-8'))
+            ip_address_repr = '"{0}"'.format(
+                ip_address_repr.decode('utf-8').replace('"', '\\"'))
         return '\n'.join([
             'lease {',
             ' interface "eth0";',
@@ -125,6 +126,13 @@ class TestFindEndpoint(TestCase):
         self.assertEqual(ip_address,
                          azure_helper.WALinuxAgentShim.find_endpoint())
 
+    def test_packed_string_with_escaped_quote(self):
+        ip_address = '100.72.34.108'
+        file_content = self._build_lease_content(ip_address, use_hex=False)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address,
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
     def test_latest_lease_used(self):
         ip_addresses = ['4.3.2.1', '98.76.54.32']
         file_content = '\n'.join([self._build_lease_content(ip_address)
-- 
cgit v1.2.3


From 20dc4190e27c7778cfa6c2943961f2ad27e14b48 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 9 Oct 2015 14:01:11 +0100
Subject: Handle colons in packed strings in WALinuxAgentShim.find_endpoint.

This fixes bug 1488896.
---
 cloudinit/sources/helpers/azure.py                   | 12 +++++++-----
 tests/unittests/test_datasource/test_azure_helper.py |  7 +++++++
 2 files changed, 14 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 33003da0..21b4cd21 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -225,16 +225,18 @@ class WALinuxAgentShim(object):
                 value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
         if value is None:
             raise Exception('No endpoint found in DHCP config.')
-        if ':' in value:
+        unescaped_value = value.replace('\\', '')
+        if len(unescaped_value) > 4:
             hex_string = ''
-            for hex_pair in value.split(':'):
+            for hex_pair in unescaped_value.split(':'):
                 if len(hex_pair) == 1:
                     hex_pair = '0' + hex_pair
                 hex_string += hex_pair
-            value = struct.pack('>L', int(hex_string.replace(':', ''), 16))
+            packed_bytes = struct.pack(
+                '>L', int(hex_string.replace(':', ''), 16))
         else:
-            value = value.replace('\\', '').encode('utf-8')
-        endpoint_ip_address = socket.inet_ntoa(value)
+            packed_bytes = unescaped_value.encode('utf-8')
+        endpoint_ip_address = socket.inet_ntoa(packed_bytes)
         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
         return endpoint_ip_address
 
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 68af31cd..5f906837 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -133,6 +133,13 @@ class TestFindEndpoint(TestCase):
         self.assertEqual(ip_address,
                          azure_helper.WALinuxAgentShim.find_endpoint())
 
+    def test_packed_string_containing_a_colon(self):
+        ip_address = '100.72.58.108'
+        file_content = self._build_lease_content(ip_address, use_hex=False)
+        self.load_file.return_value = file_content
+        self.assertEqual(ip_address,
+                         azure_helper.WALinuxAgentShim.find_endpoint())
+
     def test_latest_lease_used(self):
         ip_addresses = ['4.3.2.1', '98.76.54.32']
         file_content = '\n'.join([self._build_lease_content(ip_address)
-- 
cgit v1.2.3


From d78ea2f8191847242b639f23fe085a5dd8b36014 Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 9 Oct 2015 14:01:11 +0100
Subject: Refactor WALinuxAgentShim.find_endpoint to use a helper method for IP
 address unpacking.

---
 cloudinit/sources/helpers/azure.py | 26 +++++++++++++++-----------
 1 file changed, 15 insertions(+), 11 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 21b4cd21..fd08be16 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -216,16 +216,8 @@ class WALinuxAgentShim(object):
             self.openssl_manager.clean_up()
 
     @staticmethod
-    def find_endpoint():
-        LOG.debug('Finding Azure endpoint...')
-        content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
-        value = None
-        for line in content.splitlines():
-            if 'unknown-245' in line:
-                value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
-        if value is None:
-            raise Exception('No endpoint found in DHCP config.')
-        unescaped_value = value.replace('\\', '')
+    def get_ip_from_lease_value(lease_value):
+        unescaped_value = lease_value.replace('\\', '')
         if len(unescaped_value) > 4:
             hex_string = ''
             for hex_pair in unescaped_value.split(':'):
@@ -236,7 +228,19 @@ class WALinuxAgentShim(object):
                 '>L', int(hex_string.replace(':', ''), 16))
         else:
             packed_bytes = unescaped_value.encode('utf-8')
-        endpoint_ip_address = socket.inet_ntoa(packed_bytes)
+        return socket.inet_ntoa(packed_bytes)
+
+    @staticmethod
+    def find_endpoint():
+        LOG.debug('Finding Azure endpoint...')
+        content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+        value = None
+        for line in content.splitlines():
+            if 'unknown-245' in line:
+                value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+        if value is None:
+            raise Exception('No endpoint found in DHCP config.')
+        endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
         LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
         return endpoint_ip_address
 
-- 
cgit v1.2.3


From 86bd318e41b0bec10765d0498a125de062afe1f9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Fri, 9 Oct 2015 12:39:23 -0400
Subject: support configuring and installing the Ubuntu fan driver

  #cloud-config
  fan:
    config: |
      # fan 240
      10.0.0.0/8 eth0/16 dhcp
      10.0.0.0/8 eth1/16 dhcp off
      # fan 241
      241.0.0.0/8 eth0/16 dhcp
    config_path: /etc/network/fan

LP: #1504604
---
 ChangeLog                  |   1 +
 cloudinit/config/cc_fan.py | 101 +++++++++++++++++++++++++++++++++++++++++++++
 config/cloud.cfg           |   1 +
 3 files changed, 103 insertions(+)
 create mode 100644 cloudinit/config/cc_fan.py

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index bbb7e990..b7a66aa1 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -62,6 +62,7 @@
  - reporting: add reporting module for web hook or logging of events.
  - NoCloud: fix consumption of vendordata (LP: #1493453)
  - power_state_change: support 'condition' to disable or enable poweroff
+ - ubuntu fan: support for config and installing of ubuntu fan (LP: #1504604)
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/config/cc_fan.py b/cloudinit/config/cc_fan.py
new file mode 100644
index 00000000..39e3850e
--- /dev/null
+++ b/cloudinit/config/cc_fan.py
@@ -0,0 +1,101 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#
+#    Author: Scott Moser <scott.moser@canonical.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+"""
+fan module allows configuration of Ubuntu Fan
+  https://wiki.ubuntu.com/FanNetworking
+
+Example config:
+  #cloud-config
+  fan:
+    config: |
+      # fan 240
+      10.0.0.0/8 eth0/16 dhcp
+      10.0.0.0/8 eth1/16 dhcp off
+      # fan 241
+      241.0.0.0/8 eth0/16 dhcp
+    config_path: /etc/network/fan
+
+If cloud-init sees a 'fan' entry in cloud-config it will
+ a.) write 'config_path' with the contents
+ b.) install the package 'ubuntu-fan' if it is not installed
+ c.) ensure the service is started (or restarted if was previously running)
+"""
+
+from cloudinit import log as logging
+from cloudinit import util
+from cloudinit.settings import PER_INSTANCE
+
+LOG = logging.getLogger(__name__)
+
+frequency = PER_INSTANCE
+
+BUILTIN_CFG = {
+    'config': None,
+    'config_path': '/etc/network/fan',
+}
+
+
+def stop_update_start(service, config_file, content, systemd=False):
+    if systemd:
+        cmds = {'stop': ['systemctl', 'stop', service],
+                'start': ['systemctl', 'start', service],
+                'enable': ['systemctl', 'enable', service]}
+    else:
+        cmds = {'stop': ['service', 'stop'],
+                'start': ['service', 'start']}
+
+    def run(cmd, msg):
+        try:
+            return util.subp(cmd, capture=True)
+        except util.ProcessExecutionError as e:
+            LOG.warn("failed: %s (%s): %s", service, cmd, e)
+            return False
+
+    stop_failed = not run(cmds['stop'], msg='stop %s' % service)
+    if not content.endswith('\n'):
+        content += '\n'
+    util.write_file(config_file, content, omode="w")
+
+    ret = run(cmds['start'], msg='start %s' % service)
+    if ret and stop_failed:
+        LOG.warn("success: %s started", service)
+
+    if 'enable' in cmds:
+        ret = run(cmds['enable'], msg='enable %s' % service)
+
+    return ret
+
+
+def handle(name, cfg, cloud, log, args):
+    cfgin = cfg.get('fan')
+    if not cfgin:
+        cfgin = {}
+    mycfg = util.mergemanydict([cfgin, BUILTIN_CFG])
+
+    if not mycfg.get('config'):
+        LOG.debug("%s: no 'fan' config entry. disabling", name)
+        return
+
+    util.write_file(mycfg.get('config_path'), mycfg.get('config'), omode="w")
+    distro = cloud.distro
+    if not util.which('fanctl'):
+        distro.install_packages(['ubuntu-fan'])
+
+    stop_update_start(
+        service='ubuntu-fan', config_file=mycfg.get('config_path'),
+        content=mycfg.get('config'), systemd=distro.uses_systemd())
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 2b27f379..74794ab0 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -53,6 +53,7 @@ cloud_config_modules:
  - apt-pipelining
  - apt-configure
  - package-update-upgrade-install
+ - fan
  - landscape
  - timezone
  - puppet
-- 
cgit v1.2.3


From 92ceca45c5d2983742ce18d2e8b2e671629ef4b0 Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Wed, 14 Oct 2015 16:32:35 -0700
Subject: AZURE: support extracting SSH key values from ovf-env.xml  Azure has
 or will be offering shortly the ability to directly define the SSH  key value
 instead of a fingerprint in the ovf-env.xml file. This patch  favors defined
 SSH keys over the fingerprint method (LP: #1506244).

---
 cloudinit/sources/DataSourceAzure.py          | 16 ++++++---
 tests/unittests/test_datasource/test_azure.py | 52 ++++++++++++++++++++++-----
 2 files changed, 56 insertions(+), 12 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index ff950deb..eb9fd042 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -148,9 +148,15 @@ class DataSourceAzureNet(sources.DataSource):
             wait_for = [shcfgxml]
 
             fp_files = []
+            key_value = None
             for pk in self.cfg.get('_pubkeys', []):
-                bname = str(pk['fingerprint'] + ".crt")
-                fp_files += [os.path.join(ddir, bname)]
+                if pk.get('value', None):
+                    key_value = pk['value']
+                    LOG.info("ssh authentication: using value from fabric")
+                else:
+                    bname = str(pk['fingerprint'] + ".crt")
+                    fp_files += [os.path.join(ddir, bname)]
+                    LOG.info("ssh authentication: using fingerprint from fabirc")
 
             missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                     func=wait_for_files,
@@ -166,7 +172,8 @@ class DataSourceAzureNet(sources.DataSource):
                 metadata['instance-id'] = iid_from_shared_config(shcfgxml)
             except ValueError as e:
                 LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
-        metadata['public-keys'] = pubkeys_from_crt_files(fp_files)
+
+        metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
         return metadata
 
     def get_data(self):
@@ -497,7 +504,8 @@ def load_azure_ovf_pubkeys(sshnode):
     for pk_node in pubkeys:
         if not pk_node.hasChildNodes():
             continue
-        cur = {'fingerprint': "", 'path': ""}
+
+        cur = {'fingerprint': "", 'path': "", 'value': ""}
         for child in pk_node.childNodes:
             if child.nodeType == text_node or not child.localName:
                 continue
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 8952374f..ec0435f5 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -54,10 +54,13 @@ def construct_valid_ovf_env(data=None, pubkeys=None, userdata=None):
 
     if pubkeys:
         content += "<SSH><PublicKeys>\n"
-        for fp, path in pubkeys:
+        for fp, path, value in pubkeys:
             content += " <PublicKey>"
-            content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
-                        (fp, path))
+            if fp and path:
+                content += ("<Fingerprint>%s</Fingerprint><Path>%s</Path>" %
+                            (fp, path))
+            if value:
+                content += "<Value>%s</Value>" % value
             content += "</PublicKey>\n"
         content += "</PublicKeys></SSH>"
     content += """
@@ -297,10 +300,10 @@ class TestAzureDataSource(TestCase):
         self.assertFalse(ret)
         self.assertFalse('agent_invoked' in data)
 
-    def test_cfg_has_pubkeys(self):
+    def test_cfg_has_pubkeys_fingerprint(self):
         odata = {'HostName': "myhost", 'UserName': "myuser"}
-        mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
-        pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
+        mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
+        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
         data = {'ovfcontent': construct_valid_ovf_env(data=odata,
                                                       pubkeys=pubkeys)}
 
@@ -309,6 +312,39 @@ class TestAzureDataSource(TestCase):
         self.assertTrue(ret)
         for mypk in mypklist:
             self.assertIn(mypk, dsrc.cfg['_pubkeys'])
+            self.assertIn('pubkey_from', dsrc.metadata['public-keys'][-1])
+
+    def test_cfg_has_pubkeys_value(self):
+        # make sure that provided key is used over fingerprint
+        odata = {'HostName': "myhost", 'UserName': "myuser"}
+        mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': 'value1'}]
+        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+                                                      pubkeys=pubkeys)}
+
+        dsrc = self._get_ds(data)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+
+        for mypk in mypklist:
+            self.assertIn(mypk, dsrc.cfg['_pubkeys'])
+            self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
+
+    def test_cfg_has_no_fingerprint_has_value(self):
+        # test value is used when fingerprint not provided
+        odata = {'HostName': "myhost", 'UserName': "myuser"}
+        mypklist = [{'fingerprint': None, 'path': 'path1', 'value': 'value1'}]
+        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
+        data = {'ovfcontent': construct_valid_ovf_env(data=odata,
+                                                      pubkeys=pubkeys)}
+
+        dsrc = self._get_ds(data)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+
+        for mypk in mypklist:
+            self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
+
 
     def test_default_ephemeral(self):
         # make sure the ephemeral device works
@@ -642,8 +678,8 @@ class TestReadAzureOvf(TestCase):
             DataSourceAzure.read_azure_ovf, invalid_xml)
 
     def test_load_with_pubkeys(self):
-        mypklist = [{'fingerprint': 'fp1', 'path': 'path1'}]
-        pubkeys = [(x['fingerprint'], x['path']) for x in mypklist]
+        mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
+        pubkeys = [(x['fingerprint'], x['path'], x['value']) for x in mypklist]
         content = construct_valid_ovf_env(pubkeys=pubkeys)
         (_md, _ud, cfg) = DataSourceAzure.read_azure_ovf(content)
         for mypk in mypklist:
-- 
cgit v1.2.3


From c67d5c7e5e6c96f6cd4c2587110f592089f327bd Mon Sep 17 00:00:00 2001
From: Darren Worrall <darren@iweb.co.uk>
Date: Tue, 20 Oct 2015 09:44:50 +0100
Subject: Remove --quiet option from udevadm in AltCloud

--quiet is no longer supported

LP: #1507526
---
 cloudinit/sources/DataSourceAltCloud.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index fb528ae5..60d58d6d 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -41,7 +41,7 @@ CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
 
 # Shell command lists
 CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
 
 META_DATA_NOT_SUPPORTED = {
     'block-device-mapping': {},
-- 
cgit v1.2.3


From 34b208a05361ae6ab4a51a6a999c9ac4ab77f06a Mon Sep 17 00:00:00 2001
From: Daniel Watkins <daniel.watkins@canonical.com>
Date: Fri, 30 Oct 2015 16:26:31 +0000
Subject: Use DMI data to find Azure instance IDs.

This replaces the use of SharedConfig.xml in both the walinuxagent case,
and the case where we communicate with the Azure fabric ourselves.
---
 cloudinit/sources/DataSourceAzure.py               | 38 +----------
 cloudinit/sources/helpers/azure.py                 | 21 ------
 tests/unittests/test_datasource/test_azure.py      | 77 +++++-----------------
 .../unittests/test_datasource/test_azure_helper.py | 42 +-----------
 4 files changed, 23 insertions(+), 155 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index c6228e6c..bd80a8a6 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -31,8 +31,7 @@ from cloudinit import log as logging
 from cloudinit.settings import PER_ALWAYS
 from cloudinit import sources
 from cloudinit import util
-from cloudinit.sources.helpers.azure import (
-    get_metadata_from_fabric, iid_from_shared_config_content)
+from cloudinit.sources.helpers.azure import get_metadata_from_fabric
 
 LOG = logging.getLogger(__name__)
 
@@ -41,7 +40,6 @@ DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
 AGENT_START = ['service', 'walinuxagent', 'start']
 BOUNCE_COMMAND = ['sh', '-xc',
     "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
-DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
 
 BUILTIN_DS_CONFIG = {
     'agent_command': AGENT_START,
@@ -144,8 +142,6 @@ class DataSourceAzureNet(sources.DataSource):
                             self.ds_cfg['agent_command'])
 
             ddir = self.ds_cfg['data_dir']
-            shcfgxml = os.path.join(ddir, "SharedConfig.xml")
-            wait_for = [shcfgxml]
 
             fp_files = []
             key_value = None
@@ -160,19 +156,11 @@ class DataSourceAzureNet(sources.DataSource):
 
             missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                     func=wait_for_files,
-                                    args=(wait_for + fp_files,))
+                                    args=(fp_files,))
         if len(missing):
             LOG.warn("Did not find files, but going on: %s", missing)
 
         metadata = {}
-        if shcfgxml in missing:
-            LOG.warn("SharedConfig.xml missing, using static instance-id")
-        else:
-            try:
-                metadata['instance-id'] = iid_from_shared_config(shcfgxml)
-            except ValueError as e:
-                LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
-
         metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
         return metadata
 
@@ -229,21 +217,6 @@ class DataSourceAzureNet(sources.DataSource):
         user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
         self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
 
-        if found != ddir:
-            cached_ovfenv = util.load_file(
-                os.path.join(ddir, 'ovf-env.xml'), quiet=True, decode=False)
-            if cached_ovfenv != files['ovf-env.xml']:
-                # source was not walinux-agent's datadir, so we have to clean
-                # up so 'wait_for_files' doesn't return early due to stale data
-                cleaned = []
-                for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
-                    if os.path.exists(f):
-                        util.del_file(f)
-                        cleaned.append(f)
-                if cleaned:
-                    LOG.info("removed stale file(s) in '%s': %s",
-                             ddir, str(cleaned))
-
         # walinux agent writes files world readable, but expects
         # the directory to be protected.
         write_files(ddir, files, dirmode=0o700)
@@ -259,6 +232,7 @@ class DataSourceAzureNet(sources.DataSource):
                      " on Azure.", exc_info=True)
             return False
 
+        self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
         self.metadata.update(fabric_data)
 
         found_ephemeral = find_fabric_formatted_ephemeral_disk()
@@ -649,12 +623,6 @@ def load_azure_ds_dir(source_dir):
     return (md, ud, cfg, {'ovf-env.xml': contents})
 
 
-def iid_from_shared_config(path):
-    with open(path, "rb") as fp:
-        content = fp.read()
-    return iid_from_shared_config_content(content)
-
-
 class BrokenAzureDataSource(Exception):
     pass
 
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
index 281d733e..d90c22fd 100644
--- a/cloudinit/sources/helpers/azure.py
+++ b/cloudinit/sources/helpers/azure.py
@@ -78,12 +78,6 @@ class GoalState(object):
         return self._text_from_xpath(
             './Container/RoleInstanceList/RoleInstance/InstanceId')
 
-    @property
-    def shared_config_xml(self):
-        url = self._text_from_xpath('./Container/RoleInstanceList/RoleInstance'
-                                    '/Configuration/SharedConfig')
-        return self.http_client.get(url).contents
-
     @property
     def certificates_xml(self):
         if self._certificates_xml is None:
@@ -172,19 +166,6 @@ class OpenSSLManager(object):
         return keys
 
 
-def iid_from_shared_config_content(content):
-    """
-    find INSTANCE_ID in:
-    <?xml version="1.0" encoding="utf-8"?>
-    <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
-    <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
-        <Service name="..." guid="{00000000-0000-0000-0000-000000000000}"/>
-    """
-    root = ElementTree.fromstring(content)
-    depnode = root.find('Deployment')
-    return depnode.get('name')
-
-
 class WALinuxAgentShim(object):
 
     REPORT_READY_XML_TEMPLATE = '\n'.join([
@@ -263,8 +244,6 @@ class WALinuxAgentShim(object):
             public_keys = self.openssl_manager.parse_certificates(
                 goal_state.certificates_xml)
         data = {
-            'instance-id': iid_from_shared_config_content(
-                goal_state.shared_config_xml),
             'public-keys': public_keys,
         }
         self._report_ready(goal_state, http_client)
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index ec0435f5..3933794f 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -115,10 +115,6 @@ class TestAzureDataSource(TestCase):
             data['pubkey_files'] = flist
             return ["pubkey_from: %s" % f for f in flist]
 
-        def _iid_from_shared_config(path):
-            data['iid_from_shared_cfg'] = path
-            return 'i-my-azure-id'
-
         if data.get('ovfcontent') is not None:
             populate_dir(os.path.join(self.paths.seed_dir, "azure"),
                          {'ovf-env.xml': data['ovfcontent']})
@@ -127,20 +123,22 @@ class TestAzureDataSource(TestCase):
         mod.BUILTIN_DS_CONFIG['data_dir'] = self.waagent_d
 
         self.get_metadata_from_fabric = mock.MagicMock(return_value={
-            'instance-id': 'i-my-azure-id',
             'public-keys': [],
         })
 
+        self.instance_id = 'test-instance-id'
+
         self.apply_patches([
             (mod, 'list_possible_azure_ds_devs', dsdevs),
             (mod, 'invoke_agent', _invoke_agent),
             (mod, 'wait_for_files', _wait_for_files),
             (mod, 'pubkeys_from_crt_files', _pubkeys_from_crt_files),
-            (mod, 'iid_from_shared_config', _iid_from_shared_config),
             (mod, 'perform_hostname_bounce', mock.MagicMock()),
             (mod, 'get_hostname', mock.MagicMock()),
             (mod, 'set_hostname', mock.MagicMock()),
             (mod, 'get_metadata_from_fabric', self.get_metadata_from_fabric),
+            (mod.util, 'read_dmi_data', mock.MagicMock(
+                return_value=self.instance_id)),
         ])
 
         dsrc = mod.DataSourceAzureNet(
@@ -193,7 +191,6 @@ class TestAzureDataSource(TestCase):
         self.assertEqual(dsrc.metadata['local-hostname'], odata['HostName'])
         self.assertTrue(os.path.isfile(
             os.path.join(self.waagent_d, 'ovf-env.xml')))
-        self.assertEqual(dsrc.metadata['instance-id'], 'i-my-azure-id')
 
     def test_waagent_d_has_0700_perms(self):
         # we expect /var/lib/waagent to be created 0700
@@ -345,7 +342,6 @@ class TestAzureDataSource(TestCase):
         for mypk in mypklist:
             self.assertIn(mypk['value'], dsrc.metadata['public-keys'])
 
-
     def test_default_ephemeral(self):
         # make sure the ephemeral device works
         odata = {}
@@ -434,54 +430,6 @@ class TestAzureDataSource(TestCase):
         dsrc = self._get_ds({'ovfcontent': xml})
         dsrc.get_data()
 
-    def test_existing_ovf_same(self):
-        # waagent/SharedConfig left alone if found ovf-env.xml same as cached
-        odata = {'UserData': b64e("SOMEUSERDATA")}
-        data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
-
-        populate_dir(self.waagent_d,
-            {'ovf-env.xml': data['ovfcontent'],
-             'otherfile': 'otherfile-content',
-             'SharedConfig.xml': 'mysharedconfig'})
-
-        dsrc = self._get_ds(data)
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertTrue(os.path.exists(
-            os.path.join(self.waagent_d, 'ovf-env.xml')))
-        self.assertTrue(os.path.exists(
-            os.path.join(self.waagent_d, 'otherfile')))
-        self.assertTrue(os.path.exists(
-            os.path.join(self.waagent_d, 'SharedConfig.xml')))
-
-    def test_existing_ovf_diff(self):
-        # waagent/SharedConfig must be removed if ovfenv is found elsewhere
-
-        # 'get_data' should remove SharedConfig.xml in /var/lib/waagent
-        # if ovf-env.xml differs.
-        cached_ovfenv = construct_valid_ovf_env(
-            {'userdata': b64e("FOO_USERDATA")})
-        new_ovfenv = construct_valid_ovf_env(
-            {'userdata': b64e("NEW_USERDATA")})
-
-        populate_dir(self.waagent_d,
-            {'ovf-env.xml': cached_ovfenv,
-             'SharedConfig.xml': "mysharedconfigxml",
-             'otherfile': 'otherfilecontent'})
-
-        dsrc = self._get_ds({'ovfcontent': new_ovfenv})
-        ret = dsrc.get_data()
-        self.assertTrue(ret)
-        self.assertEqual(dsrc.userdata_raw, b"NEW_USERDATA")
-        self.assertTrue(os.path.exists(
-            os.path.join(self.waagent_d, 'otherfile')))
-        self.assertFalse(os.path.exists(
-                        os.path.join(self.waagent_d, 'SharedConfig.xml')))
-        self.assertTrue(os.path.exists(
-                        os.path.join(self.waagent_d, 'ovf-env.xml')))
-        new_xml = load_file(os.path.join(self.waagent_d, 'ovf-env.xml'))
-        self.xml_equals(new_ovfenv, new_xml)
-
     def test_exception_fetching_fabric_data_doesnt_propagate(self):
         ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
         ds.ds_cfg['agent_command'] = '__builtin__'
@@ -496,6 +444,17 @@ class TestAzureDataSource(TestCase):
         self.assertTrue(ret)
         self.assertEqual('value', ds.metadata['test'])
 
+    def test_instance_id_from_dmidecode_used(self):
+        ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        ds.get_data()
+        self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+
+    def test_instance_id_from_dmidecode_used_for_builtin(self):
+        ds = self._get_ds({'ovfcontent': construct_valid_ovf_env()})
+        ds.ds_cfg['agent_command'] = '__builtin__'
+        ds.get_data()
+        self.assertEqual(self.instance_id, ds.metadata['instance-id'])
+
 
 class TestAzureBounce(TestCase):
 
@@ -504,9 +463,6 @@ class TestAzureBounce(TestCase):
             mock.patch.object(DataSourceAzure, 'invoke_agent'))
         self.patches.enter_context(
             mock.patch.object(DataSourceAzure, 'wait_for_files'))
-        self.patches.enter_context(
-            mock.patch.object(DataSourceAzure, 'iid_from_shared_config',
-                              mock.MagicMock(return_value='i-my-azure-id')))
         self.patches.enter_context(
             mock.patch.object(DataSourceAzure, 'list_possible_azure_ds_devs',
                               mock.MagicMock(return_value=[])))
@@ -521,6 +477,9 @@ class TestAzureBounce(TestCase):
         self.patches.enter_context(
             mock.patch.object(DataSourceAzure, 'get_metadata_from_fabric',
                               mock.MagicMock(return_value={})))
+        self.patches.enter_context(
+            mock.patch.object(DataSourceAzure.util, 'read_dmi_data',
+                              mock.MagicMock(return_value='test-instance-id')))
 
     def setUp(self):
         super(TestAzureBounce, self).setUp()
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index a5228870..0638c974 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -40,7 +40,7 @@ GOAL_STATE_TEMPLATE = """\
           <HostingEnvironmentConfig>
             http://100.86.192.70:80/...hostingEnvironmentConfig...
           </HostingEnvironmentConfig>
-          <SharedConfig>{shared_config_url}</SharedConfig>
+          <SharedConfig>http://100.86.192.70:80/..SharedConfig..</SharedConfig>
           <ExtensionsConfig>
             http://100.86.192.70:80/...extensionsConfig...
           </ExtensionsConfig>
@@ -55,21 +55,6 @@ GOAL_STATE_TEMPLATE = """\
 """
 
 
-class TestReadAzureSharedConfig(unittest.TestCase):
-
-    def test_valid_content(self):
-        xml = """<?xml version="1.0" encoding="utf-8"?>
-            <SharedConfig>
-             <Deployment name="MY_INSTANCE_ID">
-              <Service name="myservice"/>
-              <ServiceInstance name="INSTANCE_ID.0" guid="{abcd-uuid}" />
-             </Deployment>
-            <Incarnation number="1"/>
-            </SharedConfig>"""
-        ret = azure_helper.iid_from_shared_config_content(xml)
-        self.assertEqual("MY_INSTANCE_ID", ret)
-
-
 class TestFindEndpoint(TestCase):
 
     def setUp(self):
@@ -140,7 +125,6 @@ class TestGoalStateParsing(TestCase):
         'incarnation': 1,
         'container_id': 'MyContainerId',
         'instance_id': 'MyInstanceId',
-        'shared_config_url': 'MySharedConfigUrl',
         'certificates_url': 'MyCertificatesUrl',
     }
 
@@ -174,20 +158,9 @@ class TestGoalStateParsing(TestCase):
         goal_state = self._get_goal_state(instance_id=instance_id)
         self.assertEqual(instance_id, goal_state.instance_id)
 
-    def test_shared_config_xml_parsed_and_fetched_correctly(self):
-        http_client = mock.MagicMock()
-        shared_config_url = 'TestSharedConfigUrl'
-        goal_state = self._get_goal_state(
-            http_client=http_client, shared_config_url=shared_config_url)
-        shared_config_xml = goal_state.shared_config_xml
-        self.assertEqual(1, http_client.get.call_count)
-        self.assertEqual(shared_config_url, http_client.get.call_args[0][0])
-        self.assertEqual(http_client.get.return_value.contents,
-                         shared_config_xml)
-
     def test_certificates_xml_parsed_and_fetched_correctly(self):
         http_client = mock.MagicMock()
-        certificates_url = 'TestSharedConfigUrl'
+        certificates_url = 'TestCertificatesUrl'
         goal_state = self._get_goal_state(
             http_client=http_client, certificates_url=certificates_url)
         certificates_xml = goal_state.certificates_xml
@@ -324,8 +297,6 @@ class TestWALinuxAgentShim(TestCase):
                 azure_helper.WALinuxAgentShim, 'find_endpoint'))
         self.GoalState = patches.enter_context(
             mock.patch.object(azure_helper, 'GoalState'))
-        self.iid_from_shared_config_content = patches.enter_context(
-            mock.patch.object(azure_helper, 'iid_from_shared_config_content'))
         self.OpenSSLManager = patches.enter_context(
             mock.patch.object(azure_helper, 'OpenSSLManager'))
         patches.enter_context(
@@ -367,15 +338,6 @@ class TestWALinuxAgentShim(TestCase):
         data = shim.register_with_azure_and_fetch_data()
         self.assertEqual([], data['public-keys'])
 
-    def test_instance_id_returned_in_data(self):
-        shim = azure_helper.WALinuxAgentShim()
-        data = shim.register_with_azure_and_fetch_data()
-        self.assertEqual(
-            [mock.call(self.GoalState.return_value.shared_config_xml)],
-            self.iid_from_shared_config_content.call_args_list)
-        self.assertEqual(self.iid_from_shared_config_content.return_value,
-                         data['instance-id'])
-
     def test_correct_url_used_for_report_ready(self):
         self.find_endpoint.return_value = 'test_endpoint'
         shim = azure_helper.WALinuxAgentShim()
-- 
cgit v1.2.3


From f1db8eaa68dadaae6a591339f69994e3afb589c3 Mon Sep 17 00:00:00 2001
From: Ben Howard <ben.howard@canonical.com>
Date: Mon, 9 Nov 2015 16:40:43 -0700
Subject: With Ubuntu 15.10, "nobootwait" != "nofail". The "nobootwait" was an
 Ubuntu specific option. This change was dropped in 15.10 (LP: #1514485).

---
 cloudinit/config/cc_mounts.py | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 47b63dfc..11089d8d 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -263,7 +263,11 @@ def handle_swapcfg(swapcfg):
 
 def handle(_name, cfg, cloud, log, _args):
     # fs_spec, fs_file, fs_vfstype, fs_mntops, fs-freq, fs_passno
-    defvals = [None, None, "auto", "defaults,nobootwait", "0", "2"]
+    def_mnt_opts = "defaults,nobootwait"
+    if cloud.distro.uses_systemd():
+        def_mnt_opts = "defaults,nofail"
+
+    defvals = [None, None, "auto", def_mnt_opts, "0", "2"]
     defvals = cfg.get("mount_default_fields", defvals)
 
     # these are our default set of mounts
-- 
cgit v1.2.3


From 8844ffb5988bcfbb8cfbe57d9139c3dcb8b429cc Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Wed, 18 Nov 2015 16:03:15 -0800
Subject: Add Image Customization Parser for VMware vSphere Hypervisor Support.

This is the first changeset submitted as a part of project to
add cloud-init support for VMware vSphere Hypervisor. This changeset
contains _only_ the changes for a simple python parser for a
Image Customization Specification file pushed by VMware vSphere
hypervisor into the guest VMs. In a later changeset, will be submitting
another patch to actually detect the underlying VMware vSphere hypervisor
and do the necessary customization.
---
 cloudinit/sources/helpers/vmware/__init__.py       |  13 ++
 cloudinit/sources/helpers/vmware/imc/__init__.py   |  13 ++
 cloudinit/sources/helpers/vmware/imc/boot_proto.py |  11 +
 cloudinit/sources/helpers/vmware/imc/config.py     | 125 ++++++++++++
 .../sources/helpers/vmware/imc/config_file.py      | 221 +++++++++++++++++++++
 .../sources/helpers/vmware/imc/config_namespace.py |   5 +
 .../sources/helpers/vmware/imc/config_source.py    |   2 +
 cloudinit/sources/helpers/vmware/imc/ipv4_mode.py  |  29 +++
 cloudinit/sources/helpers/vmware/imc/nic.py        | 107 ++++++++++
 9 files changed, 526 insertions(+)
 create mode 100644 cloudinit/sources/helpers/vmware/__init__.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/__init__.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/boot_proto.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/config.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/config_file.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/config_namespace.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/config_source.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
 create mode 100644 cloudinit/sources/helpers/vmware/imc/nic.py

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
new file mode 100644
index 00000000..6c3b070a
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -0,0 +1,11 @@
+# from enum import Enum
+
+class BootProto:
+    DHCP = 'dhcp'
+    STATIC = 'static'
+
+#     def __eq__(self, other):
+#         return self.name == other.name and self.value == other.value
+#
+#     def __ne__(self, other):
+#         return not self.__eq__(other)
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
new file mode 100644
index 00000000..ea0873fb
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -0,0 +1,125 @@
+from cloudinit.sources.helpers.vmware.imc.nic import Nic
+
+
+class Config:
+    DNS = 'DNS|NAMESERVER|'
+    SUFFIX = 'DNS|SUFFIX|'
+    PASS = 'PASSWORD|-PASS'
+    TIMEZONE = 'DATETIME|TIMEZONE'
+    UTC = 'DATETIME|UTC'
+    HOSTNAME = 'NETWORK|HOSTNAME'
+    OMAINNAME = 'NETWORK|DOMAINNAME'
+
+    def __init__(self, configFile):
+        self._configFile = configFile
+
+    # Retrieves hostname.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   string: hostname
+    # Throws:
+    #   None
+    @property
+    def hostName(self):
+        return self._configFile.get(Config.HOSTNAME, None)
+
+    # Retrieves domainName.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   string: domainName
+    # Throws:
+    #   None
+    @property
+    def domainName(self):
+        return self._configFile.get(Config.DOMAINNAME, None)
+
+    # Retrieves timezone.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   string: timezone
+    # Throws:
+    #   None
+    @property
+    def timeZone(self):
+        return self._configFile.get(Config.TIMEZONE, None)
+
+    # Retrieves whether to set time to UTC or Local.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   boolean: True for yes/YES, True for no/NO, otherwise - None
+    # Throws:
+    #   None
+    @property
+    def utc(self):
+        return self._configFile.get(Config.UTC, None)
+
+    # Retrieves root password to be set.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   string: base64-encoded root password or None
+    # Throws:
+    #   None
+    @property
+    def adminPassword(self):
+        return self._configFile.get(Config.PASS, None)
+
+    # Retrieves DNS Servers.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   integer: count or 0
+    # Throws:
+    #   None
+    @property
+    def nameServers(self):
+        res = []
+        for i in range(1, self._configFile.getCnt(Config.DNS) + 1):
+            key = Config.DNS + str(i)
+            res.append(self._configFile[key])
+
+        return res
+
+    # Retrieves DNS Suffixes.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   integer: count or 0
+    # Throws:
+    #   None
+    @property
+    def dnsSuffixes(self):
+        res = []
+        for i in range(1, self._configFile.getCnt(Config.SUFFIX) + 1):
+            key = Config.SUFFIX + str(i)
+            res.append(self._configFile[key])
+
+        return res
+
+    # Retrieves NICs.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   integer: count
+    # Throws:
+    #   None
+    @property
+    def nics(self):
+        res = []
+        nics = self._configFile['NIC-CONFIG|NICS']
+        for nic in nics.split(','):
+            res.append(Nic(nic, self._configFile))
+
+        return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
new file mode 100644
index 00000000..3f9938da
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -0,0 +1,221 @@
+import logging
+import re
+
+from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigFile(ConfigSource):
+    def __init__(self):
+        self._configData = {}
+
+    def __getitem__(self, key):
+        return self._configData[key]
+
+    def get(self, key, default=None):
+        return self._configData.get(key, default)
+
+    # Removes all the properties.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   None
+    # Throws:
+    #   None
+    def clear(self):
+        self._configData.clear()
+
+    # Inserts k/v pair.
+    #
+    # Does not do any key/cross-key validation.
+    #
+    # Args:
+    #   key: string: key
+    #   val: string: value
+    # Results:
+    #   None
+    # Throws:
+    #   None
+    def _insertKey(self, key, val):
+        # cleaning up on all "input" path
+
+        # remove end char \n (chomp)
+        key = key.strip()
+        val = val.strip()
+
+        if key.startswith('-') or '|-' in key:
+            canLog = 0
+        else:
+            canLog = 1
+
+        # "sensitive" settings shall not be logged
+        if canLog:
+            logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
+        else:
+            logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
+
+        self._configData[key] = val
+
+    # Determines properties count.
+    #
+    # Args:
+    #   None
+    # Results:
+    #   integer: properties count
+    # Throws:
+    #   None
+    def size(self):
+        return len(self._configData)
+
+    # Parses properties from a .cfg file content.
+    #
+    # Any previously available properties will be removed.
+    #
+    # Sensitive data will not be logged in case key starts from '-'.
+    #
+    # Args:
+    #   content: string: e.g. content of config/cust.cfg
+    # Results:
+    #   None
+    # Throws:
+    #   None
+    def loadConfigContent(self, content):
+        self.clear()
+
+        # remove end char \n (chomp)
+        for line in content.split('\n'):
+            # TODO validate against allowed characters (not done in Perl)
+
+            # spaces at the end are not allowed, things like passwords must be
+            # at least base64-encoded
+            line = line.strip()
+
+            # "sensitive" settings shall not be logged
+            if line.startswith('-'):
+                canLog = 0
+            else:
+                canLog = 1
+
+            if canLog:
+                logger.debug("Processing line: '%s'" % line)
+            else:
+                logger.debug("Processing line: '***********************'")
+
+            if not line:
+                logger.debug("Empty line. Ignored.")
+                continue
+
+            if line.startswith('#'):
+                logger.debug("Comment found. Line ignored.")
+                continue
+
+            matchObj = re.match(r'\[(.+)\]', line)
+            if matchObj:
+                category = matchObj.group(1)
+                logger.debug("FOUND CATEGORY = '%s'" % category)
+            else:
+                # POSIX.2 regex doesn't support non-greedy like in (.+?)=(.*)
+                # key value pair (non-eager '=' for base64)
+                matchObj = re.match(r'([^=]+)=(.*)', line)
+                if matchObj:
+                    # cleaning up on all "input" paths
+                    key = category + "|" + matchObj.group(1).strip()
+                    val = matchObj.group(2).strip()
+
+                    self._insertKey(key, val)
+                else:
+                    # TODO document
+                    raise Exception("Unrecognizable line: '%s'" % line)
+
+        self.validate()
+
+    # Parses properties from a .cfg file
+    #
+    # Any previously available properties will be removed.
+    #
+    # Sensitive data will not be logged in case key starts from '-'.
+    #
+    # Args:
+    #   filename: string: full path to a .cfg file
+    # Results:
+    #   None
+    # Throws:
+    #   None
+    def loadConfigFile(self, filename):
+        logger.info("Opening file name %s." % filename)
+        # TODO what throws?
+        with open(filename, "r") as myfile:
+            self.loadConfigContent(myfile.read())
+
+    # Determines whether a property with a given key exists.
+    #
+    # Args:
+    #   key: string: key
+    # Results:
+    #   boolean: True if such property exists, otherwise - False.
+    # Throws:
+    #   None
+    def hasKey(self, key):
+        return key in self._configData
+
+    # Determines whether a value for a property must be kept.
+    #
+    # If the property is missing, it's treated as it should be not changed by
+    # the engine.
+    #
+    # Args:
+    #   key: string: key
+    # Results:
+    #   boolean: True if property must be kept, otherwise - False.
+    # Throws:
+    #   None
+    def keepCurrentValue(self, key):
+        # helps to distinguish from "empty" value which is used to indicate
+        # "removal"
+        return not self.hasKey(key)
+
+    # Determines whether a value for a property must be removed.
+    #
+    # If the property is empty, it's treated as it should be removed by the
+    # engine.
+    #
+    # Args:
+    #   key: string: key
+    # Results:
+    #   boolean: True if property must be removed, otherwise - False.
+    # Throws:
+    #   None
+    def removeCurrentValue(self, key):
+        # helps to distinguish from "missing" value which is used to indicate
+        # "keeping unchanged"
+        if self.hasKey(key):
+            return not bool(self._configData[key])
+        else:
+            return False
+
+    # TODO
+    def getCnt(self, prefix):
+        res = 0
+        for key in self._configData.keys():
+            if key.startswith(prefix):
+                res += 1
+
+        return res
+
+    # TODO
+    # TODO pass base64
+    # Throws:
+    #   Dies in case timezone is present but empty.
+    #   Dies in case password is present but empty.
+    #   Dies in case hostname is present but empty or greater than 63 chars.
+    #   Dies in case UTC is present, but is not yes/YES or no/NO.
+    #   Dies in case NICS is not present.
+    def validate(self):
+        # TODO must log all the errors
+        keyValidators = {'NIC1|IPv6GATEWAY|': None}
+        crossValidators = {}
+
+        for key in self._configData.keys():
+            pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
new file mode 100644
index 00000000..7f76ac8b
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -0,0 +1,5 @@
+from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
+
+
+class ConfigNamespace(ConfigSource):
+    pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
new file mode 100644
index 00000000..fad3a389
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -0,0 +1,2 @@
+class ConfigSource:
+    pass
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
new file mode 100644
index 00000000..66b4fad7
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -0,0 +1,29 @@
+# from enum import Enum
+
+
+# The IPv4 configuration mode which directly represents the user's goal.
+#
+# This mode effectively acts as a contract of the inguest customization engine.
+# It must be set based on what the user has requested via VMODL/generators API
+# and should not be changed by those layers. It's up to the in-guest engine to
+# interpret and materialize the user's request.
+#
+# Also defined in linuxconfiggenerator.h.
+class Ipv4Mode:
+    # The legacy mode which only allows dhcp/static based on whether IPv4
+    # addresses list is empty or not
+    IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+    # IPv4 must use static address. Reserved for future use
+    IPV4_MODE_STATIC = 'STATIC'
+    # IPv4 must use DHCPv4. Reserved for future use
+    IPV4_MODE_DHCP = 'DHCP'
+    # IPv4 must be disabled
+    IPV4_MODE_DISABLED = 'DISABLED'
+    # IPv4 settings should be left untouched. Reserved for future use
+    IPV4_MODE_AS_IS = 'AS_IS'
+
+    # def __eq__(self, other):
+    #     return self.name == other.name and self.value == other.value
+    #
+    # def __ne__(self, other):
+    #     return not self.__eq__(other)
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
new file mode 100644
index 00000000..b90a5640
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -0,0 +1,107 @@
+from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProto
+
+
+class Nic:
+    def __init__(self, name, configFile):
+        self._name = name
+        self._configFile = configFile
+
+    def _get(self, what):
+        return self._configFile.get(self.name + what, None)
+
+    def _getCnt(self, prefix):
+        return self._configFile.getCnt(self.name + prefix)
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def mac(self):
+        return self._get('|MACADDR').lower()
+
+    @property
+    def bootProto(self):
+        return self._get('|BOOTPROTO').lower()
+
+    @property
+    def ipv4(self):
+        # TODO implement NONE
+        if self.bootProto == BootProto.STATIC:
+            return StaticIpv4Conf(self)
+
+        return DhcpIpv4Conf(self)
+
+    @property
+    def ipv6(self):
+        # TODO implement NONE
+        cnt = self._getCnt("|IPv6ADDR|")
+
+        if cnt != 0:
+            return StaticIpv6Conf(self)
+
+        return DhcpIpv6Conf(self)
+
+
+class DhcpIpv4Conf:
+    def __init__(self, nic):
+        self._nic = nic
+
+
+class StaticIpv4Addr:
+    def __init__(self, nic):
+        self._nic = nic
+
+    @property
+    def ip(self):
+        return self._nic._get('|IPADDR')
+
+    @property
+    def netmask(self):
+        return self._nic._get('|NETMASK')
+
+    @property
+    def gateway(self):
+        return self._nic._get('|GATEWAY')
+
+
+class StaticIpv4Conf(DhcpIpv4Conf):
+    @property
+    def addrs(self):
+        return [StaticIpv4Addr(self._nic)]
+
+
+class DhcpIpv6Conf:
+    def __init__(self, nic):
+        self._nic = nic
+
+
+class StaticIpv6Addr:
+    def __init__(self, nic, index):
+        self._nic = nic
+        self._index = index
+
+    @property
+    def ip(self):
+        return self._nic._get("|IPv6ADDR|" + str(self._index))
+
+    @property
+    def prefix(self):
+        return self._nic._get("|IPv6NETMASK|" + str(self._index))
+
+    @property
+    def gateway(self):
+        return self._nic._get("|IPv6GATEWAY|" + str(self._index))
+
+
+class StaticIpv6Conf(DhcpIpv6Conf):
+    @property
+    def addrs(self):
+        cnt = self._nic._getCnt("|IPv6ADDR|")
+
+        res = []
+
+        for i in range(1, cnt + 1):
+            res.append(StaticIpv6Addr(self._nic, i))
+
+        return res
-- 
cgit v1.2.3


From 8d9e5bd7fcda8f56a4fe087150db1456af738335 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Tue, 5 Jan 2016 12:05:11 -0800
Subject:  Fixed all the styling nits.  Used proper naming convention for the
 methods.  Added proper documentation.  Checked pep8 and flake8 output and no
 issues were reported.

---
 cloudinit/sources/helpers/vmware/imc/boot_proto.py |  28 +-
 cloudinit/sources/helpers/vmware/imc/config.py     | 116 +++----
 .../sources/helpers/vmware/imc/config_file.py      | 372 +++++++++------------
 .../sources/helpers/vmware/imc/config_namespace.py |  22 +-
 .../sources/helpers/vmware/imc/config_source.py    |  21 ++
 cloudinit/sources/helpers/vmware/imc/ipv4_mode.py  |  74 ++--
 cloudinit/sources/helpers/vmware/imc/nic.py        | 254 ++++++++------
 7 files changed, 448 insertions(+), 439 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index 6c3b070a..abfffd75 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -1,11 +1,25 @@
-# from enum import Enum
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
 
 class BootProto:
+    """Specifies the NIC Boot Settings."""
+
     DHCP = 'dhcp'
     STATIC = 'static'
-
-#     def __eq__(self, other):
-#         return self.name == other.name and self.value == other.value
-#
-#     def __ne__(self, other):
-#         return not self.__eq__(other)
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index ea0873fb..7eee47a5 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -1,122 +1,90 @@
-from cloudinit.sources.helpers.vmware.imc.nic import Nic
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from .nic import Nic
 
 
 class Config:
+    """
+    Stores the Contents specified in the Customization
+    Specification file.
+    """
+
     DNS = 'DNS|NAMESERVER|'
     SUFFIX = 'DNS|SUFFIX|'
     PASS = 'PASSWORD|-PASS'
     TIMEZONE = 'DATETIME|TIMEZONE'
     UTC = 'DATETIME|UTC'
     HOSTNAME = 'NETWORK|HOSTNAME'
-    OMAINNAME = 'NETWORK|DOMAINNAME'
+    DOMAINNAME = 'NETWORK|DOMAINNAME'
 
     def __init__(self, configFile):
         self._configFile = configFile
 
-    # Retrieves hostname.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   string: hostname
-    # Throws:
-    #   None
     @property
-    def hostName(self):
+    def host_name(self):
+        """Return the hostname."""
         return self._configFile.get(Config.HOSTNAME, None)
 
-    # Retrieves domainName.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   string: domainName
-    # Throws:
-    #   None
     @property
-    def domainName(self):
+    def domain_name(self):
+        """Return the domain name."""
         return self._configFile.get(Config.DOMAINNAME, None)
 
-    # Retrieves timezone.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   string: timezone
-    # Throws:
-    #   None
     @property
-    def timeZone(self):
+    def timezone(self):
+        """Return the timezone."""
         return self._configFile.get(Config.TIMEZONE, None)
 
-    # Retrieves whether to set time to UTC or Local.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   boolean: True for yes/YES, True for no/NO, otherwise - None
-    # Throws:
-    #   None
     @property
     def utc(self):
+        """Retrieves whether to set time to UTC or Local."""
         return self._configFile.get(Config.UTC, None)
 
-    # Retrieves root password to be set.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   string: base64-encoded root password or None
-    # Throws:
-    #   None
     @property
-    def adminPassword(self):
+    def admin_password(self):
+        """Return the root password to be set."""
         return self._configFile.get(Config.PASS, None)
 
-    # Retrieves DNS Servers.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   integer: count or 0
-    # Throws:
-    #   None
     @property
-    def nameServers(self):
+    def name_servers(self):
+        """Return the list of DNS servers."""
         res = []
-        for i in range(1, self._configFile.getCnt(Config.DNS) + 1):
+        for i in range(1, self._configFile.get_count(Config.DNS) + 1):
             key = Config.DNS + str(i)
             res.append(self._configFile[key])
 
         return res
 
-    # Retrieves DNS Suffixes.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   integer: count or 0
-    # Throws:
-    #   None
     @property
-    def dnsSuffixes(self):
+    def dns_suffixes(self):
+        """Return the list of DNS Suffixes."""
         res = []
-        for i in range(1, self._configFile.getCnt(Config.SUFFIX) + 1):
+        for i in range(1, self._configFile.get_count(Config.SUFFIX) + 1):
             key = Config.SUFFIX + str(i)
             res.append(self._configFile[key])
 
         return res
 
-    # Retrieves NICs.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   integer: count
-    # Throws:
-    #   None
     @property
     def nics(self):
+        """Return the list of associated NICs."""
         res = []
         nics = self._configFile['NIC-CONFIG|NICS']
         for nic in nics.split(','):
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 3f9938da..e08a2a9a 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -1,221 +1,151 @@
-import logging
-import re
-
-from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
-
-logger = logging.getLogger(__name__)
-
-
-class ConfigFile(ConfigSource):
-    def __init__(self):
-        self._configData = {}
-
-    def __getitem__(self, key):
-        return self._configData[key]
-
-    def get(self, key, default=None):
-        return self._configData.get(key, default)
-
-    # Removes all the properties.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   None
-    # Throws:
-    #   None
-    def clear(self):
-        self._configData.clear()
-
-    # Inserts k/v pair.
-    #
-    # Does not do any key/cross-key validation.
-    #
-    # Args:
-    #   key: string: key
-    #   val: string: value
-    # Results:
-    #   None
-    # Throws:
-    #   None
-    def _insertKey(self, key, val):
-        # cleaning up on all "input" path
-
-        # remove end char \n (chomp)
-        key = key.strip()
-        val = val.strip()
-
-        if key.startswith('-') or '|-' in key:
-            canLog = 0
-        else:
-            canLog = 1
-
-        # "sensitive" settings shall not be logged
-        if canLog:
-            logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
-        else:
-            logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
-
-        self._configData[key] = val
-
-    # Determines properties count.
-    #
-    # Args:
-    #   None
-    # Results:
-    #   integer: properties count
-    # Throws:
-    #   None
-    def size(self):
-        return len(self._configData)
-
-    # Parses properties from a .cfg file content.
-    #
-    # Any previously available properties will be removed.
-    #
-    # Sensitive data will not be logged in case key starts from '-'.
-    #
-    # Args:
-    #   content: string: e.g. content of config/cust.cfg
-    # Results:
-    #   None
-    # Throws:
-    #   None
-    def loadConfigContent(self, content):
-        self.clear()
-
-        # remove end char \n (chomp)
-        for line in content.split('\n'):
-            # TODO validate against allowed characters (not done in Perl)
-
-            # spaces at the end are not allowed, things like passwords must be
-            # at least base64-encoded
-            line = line.strip()
-
-            # "sensitive" settings shall not be logged
-            if line.startswith('-'):
-                canLog = 0
-            else:
-                canLog = 1
-
-            if canLog:
-                logger.debug("Processing line: '%s'" % line)
-            else:
-                logger.debug("Processing line: '***********************'")
-
-            if not line:
-                logger.debug("Empty line. Ignored.")
-                continue
-
-            if line.startswith('#'):
-                logger.debug("Comment found. Line ignored.")
-                continue
-
-            matchObj = re.match(r'\[(.+)\]', line)
-            if matchObj:
-                category = matchObj.group(1)
-                logger.debug("FOUND CATEGORY = '%s'" % category)
-            else:
-                # POSIX.2 regex doesn't support non-greedy like in (.+?)=(.*)
-                # key value pair (non-eager '=' for base64)
-                matchObj = re.match(r'([^=]+)=(.*)', line)
-                if matchObj:
-                    # cleaning up on all "input" paths
-                    key = category + "|" + matchObj.group(1).strip()
-                    val = matchObj.group(2).strip()
-
-                    self._insertKey(key, val)
-                else:
-                    # TODO document
-                    raise Exception("Unrecognizable line: '%s'" % line)
-
-        self.validate()
-
-    # Parses properties from a .cfg file
-    #
-    # Any previously available properties will be removed.
-    #
-    # Sensitive data will not be logged in case key starts from '-'.
-    #
-    # Args:
-    #   filename: string: full path to a .cfg file
-    # Results:
-    #   None
-    # Throws:
-    #   None
-    def loadConfigFile(self, filename):
-        logger.info("Opening file name %s." % filename)
-        # TODO what throws?
-        with open(filename, "r") as myfile:
-            self.loadConfigContent(myfile.read())
-
-    # Determines whether a property with a given key exists.
-    #
-    # Args:
-    #   key: string: key
-    # Results:
-    #   boolean: True if such property exists, otherwise - False.
-    # Throws:
-    #   None
-    def hasKey(self, key):
-        return key in self._configData
-
-    # Determines whether a value for a property must be kept.
-    #
-    # If the property is missing, it's treated as it should be not changed by
-    # the engine.
-    #
-    # Args:
-    #   key: string: key
-    # Results:
-    #   boolean: True if property must be kept, otherwise - False.
-    # Throws:
-    #   None
-    def keepCurrentValue(self, key):
-        # helps to distinguish from "empty" value which is used to indicate
-        # "removal"
-        return not self.hasKey(key)
-
-    # Determines whether a value for a property must be removed.
-    #
-    # If the property is empty, it's treated as it should be removed by the
-    # engine.
-    #
-    # Args:
-    #   key: string: key
-    # Results:
-    #   boolean: True if property must be removed, otherwise - False.
-    # Throws:
-    #   None
-    def removeCurrentValue(self, key):
-        # helps to distinguish from "missing" value which is used to indicate
-        # "keeping unchanged"
-        if self.hasKey(key):
-            return not bool(self._configData[key])
-        else:
-            return False
-
-    # TODO
-    def getCnt(self, prefix):
-        res = 0
-        for key in self._configData.keys():
-            if key.startswith(prefix):
-                res += 1
-
-        return res
-
-    # TODO
-    # TODO pass base64
-    # Throws:
-    #   Dies in case timezone is present but empty.
-    #   Dies in case password is present but empty.
-    #   Dies in case hostname is present but empty or greater than 63 chars.
-    #   Dies in case UTC is present, but is not yes/YES or no/NO.
-    #   Dies in case NICS is not present.
-    def validate(self):
-        # TODO must log all the errors
-        keyValidators = {'NIC1|IPv6GATEWAY|': None}
-        crossValidators = {}
-
-        for key in self._configData.keys():
-            pass
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+
+try:
+    import configparser
+except ImportError:
+    import ConfigParser as configparser
+
+from .config_source import ConfigSource
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigFile(ConfigSource, dict):
+    """ConfigFile module to load the content from a specified source."""
+
+    def __init__(self):
+        pass
+
+    def _insertKey(self, key, val):
+        """
+        Inserts a Key Value pair.
+
+        Keyword arguments:
+        key -- The key to insert
+        val -- The value to insert for the key
+
+        """
+        key = key.strip()
+        val = val.strip()
+
+        if key.startswith('-') or '|-' in key:
+            canLog = 0
+        else:
+            canLog = 1
+
+        # "sensitive" settings shall not be logged
+        if canLog:
+            logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
+        else:
+            logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
+
+        self[key] = val
+
+    def size(self):
+        """Return the number of properties present."""
+        return len(self)
+
+    def loadConfigFile(self, filename):
+        """
+        Parses properties from the specified config file.
+
+        Any previously available properties will be removed.
+        Sensitive data will not be logged in case the key starts
+        from '-'.
+
+        Keyword arguments:
+        filename - The full path to the config file.
+        """
+        logger.info('Parsing the config file %s.' % filename)
+
+        config = configparser.ConfigParser()
+        config.optionxform = str
+        config.read(filename)
+
+        self.clear()
+
+        for category in config.sections():
+            logger.debug("FOUND CATEGORY = '%s'" % category)
+
+            for (key, value) in config.items(category):
+                # "sensitive" settings shall not be logged
+                if key.startswith('-'):
+                    canLog = 0
+                else:
+                    canLog = 1
+
+                if canLog:
+                    logger.debug("Processing key, value: '%s':'%s'" %
+                                 (key, value))
+                else:
+                    logger.debug("Processing key, value : "
+                                 "'*********************'")
+
+                self._insertKey(category + '|' + key, value)
+
+    def keep_current_value(self, key):
+        """
+        Determines whether a value for a property must be kept.
+
+        If the propery is missing, it is treated as it should be not
+        changed by the engine.
+
+        Keyword arguments:
+        key -- The key to search for.
+        """
+        # helps to distinguish from "empty" value which is used to indicate
+        # "removal"
+        return not key in self
+
+    def remove_current_value(self, key):
+        """
+        Determines whether a value for the property must be removed.
+
+        If the specified key is empty, it is treated as it should be
+        removed by the engine.
+
+        Return true if the value can be removed, false otherwise.
+
+        Keyword arguments:
+        key -- The key to search for.
+        """
+        # helps to distinguish from "missing" value which is used to indicate
+        # "keeping unchanged"
+        if key in self:
+            return not bool(self[key])
+        else:
+            return False
+
+    def get_count(self, prefix):
+        """
+        Return the total number of keys that start with the
+        specified prefix.
+
+        Keyword arguments:
+        prefix -- prefix of the key
+        """
+        res = 0
+        for key in self.keys():
+            if key.startswith(prefix):
+                res += 1
+
+        return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
index 7f76ac8b..7266b699 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_namespace.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -1,5 +1,25 @@
-from cloudinit.sources.helpers.vmware.imc.config_source import ConfigSource
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from .config_source import ConfigSource
 
 
 class ConfigNamespace(ConfigSource):
+    """Specifies the Config Namespace."""
     pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
index fad3a389..a367e476 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_source.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -1,2 +1,23 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
 class ConfigSource:
+    """Specifies a source for the Config Content."""
     pass
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index 66b4fad7..28544e4f 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -1,29 +1,45 @@
-# from enum import Enum
-
-
-# The IPv4 configuration mode which directly represents the user's goal.
-#
-# This mode effectively acts as a contract of the inguest customization engine.
-# It must be set based on what the user has requested via VMODL/generators API
-# and should not be changed by those layers. It's up to the in-guest engine to
-# interpret and materialize the user's request.
-#
-# Also defined in linuxconfiggenerator.h.
-class Ipv4Mode:
-    # The legacy mode which only allows dhcp/static based on whether IPv4
-    # addresses list is empty or not
-    IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
-    # IPv4 must use static address. Reserved for future use
-    IPV4_MODE_STATIC = 'STATIC'
-    # IPv4 must use DHCPv4. Reserved for future use
-    IPV4_MODE_DHCP = 'DHCP'
-    # IPv4 must be disabled
-    IPV4_MODE_DISABLED = 'DISABLED'
-    # IPv4 settings should be left untouched. Reserved for future use
-    IPV4_MODE_AS_IS = 'AS_IS'
-
-    # def __eq__(self, other):
-    #     return self.name == other.name and self.value == other.value
-    #
-    # def __ne__(self, other):
-    #     return not self.__eq__(other)
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+class Ipv4Mode:
+    """
+    The IPv4 configuration mode which directly represents the user's goal.
+
+    This mode effectively acts as a contract of the in-guest customization
+    engine. It must be set based on what the user has requested and should
+    not be changed by those layers. It's up to the in-guest engine to
+    interpret and materialize the user's request.
+    """
+
+    # The legacy mode which only allows dhcp/static based on whether IPv4
+    # addresses list is empty or not
+    IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+
+    # IPv4 must use static address. Reserved for future use
+    IPV4_MODE_STATIC = 'STATIC'
+
+    # IPv4 must use DHCPv4. Reserved for future use
+    IPV4_MODE_DHCP = 'DHCP'
+
+    # IPv4 must be disabled
+    IPV4_MODE_DISABLED = 'DISABLED'
+
+    # IPv4 settings should be left untouched. Reserved for future use
+    IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index b90a5640..bb45a9e6 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -1,107 +1,147 @@
-from cloudinit.sources.helpers.vmware.imc.boot_proto import BootProto
-
-
-class Nic:
-    def __init__(self, name, configFile):
-        self._name = name
-        self._configFile = configFile
-
-    def _get(self, what):
-        return self._configFile.get(self.name + what, None)
-
-    def _getCnt(self, prefix):
-        return self._configFile.getCnt(self.name + prefix)
-
-    @property
-    def name(self):
-        return self._name
-
-    @property
-    def mac(self):
-        return self._get('|MACADDR').lower()
-
-    @property
-    def bootProto(self):
-        return self._get('|BOOTPROTO').lower()
-
-    @property
-    def ipv4(self):
-        # TODO implement NONE
-        if self.bootProto == BootProto.STATIC:
-            return StaticIpv4Conf(self)
-
-        return DhcpIpv4Conf(self)
-
-    @property
-    def ipv6(self):
-        # TODO implement NONE
-        cnt = self._getCnt("|IPv6ADDR|")
-
-        if cnt != 0:
-            return StaticIpv6Conf(self)
-
-        return DhcpIpv6Conf(self)
-
-
-class DhcpIpv4Conf:
-    def __init__(self, nic):
-        self._nic = nic
-
-
-class StaticIpv4Addr:
-    def __init__(self, nic):
-        self._nic = nic
-
-    @property
-    def ip(self):
-        return self._nic._get('|IPADDR')
-
-    @property
-    def netmask(self):
-        return self._nic._get('|NETMASK')
-
-    @property
-    def gateway(self):
-        return self._nic._get('|GATEWAY')
-
-
-class StaticIpv4Conf(DhcpIpv4Conf):
-    @property
-    def addrs(self):
-        return [StaticIpv4Addr(self._nic)]
-
-
-class DhcpIpv6Conf:
-    def __init__(self, nic):
-        self._nic = nic
-
-
-class StaticIpv6Addr:
-    def __init__(self, nic, index):
-        self._nic = nic
-        self._index = index
-
-    @property
-    def ip(self):
-        return self._nic._get("|IPv6ADDR|" + str(self._index))
-
-    @property
-    def prefix(self):
-        return self._nic._get("|IPv6NETMASK|" + str(self._index))
-
-    @property
-    def gateway(self):
-        return self._nic._get("|IPv6GATEWAY|" + str(self._index))
-
-
-class StaticIpv6Conf(DhcpIpv6Conf):
-    @property
-    def addrs(self):
-        cnt = self._nic._getCnt("|IPv6ADDR|")
-
-        res = []
-
-        for i in range(1, cnt + 1):
-            res.append(StaticIpv6Addr(self._nic, i))
-
-        return res
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+from .boot_proto import BootProto
+
+
+class Nic:
+    """
+    Holds the information about each NIC specified
+    in the customization specification file
+    """
+
+    def __init__(self, name, configFile):
+        self._name = name
+        self._configFile = configFile
+
+    def _get(self, what):
+        return self._configFile.get(self.name + what, None)
+
+    def _get_count(self, prefix):
+        return self._configFile.get_count(self.name + prefix)
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def mac(self):
+        return self._get('|MACADDR').lower()
+
+    @property
+    def bootProto(self):
+        return self._get('|BOOTPROTO').lower()
+
+    @property
+    def ipv4(self):
+        """
+        Retrieves the DHCP or Static IPv6 configuration
+        based on the BOOTPROTO property associated with the NIC
+        """
+        if self.bootProto == BootProto.STATIC:
+            return StaticIpv4Conf(self)
+
+        return DhcpIpv4Conf(self)
+
+    @property
+    def ipv6(self):
+        cnt = self._get_count("|IPv6ADDR|")
+
+        if cnt != 0:
+            return StaticIpv6Conf(self)
+
+        return DhcpIpv6Conf(self)
+
+
+class DhcpIpv4Conf:
+    """DHCP Configuration Setting."""
+
+    def __init__(self, nic):
+        self._nic = nic
+
+
+class StaticIpv4Addr:
+    """Static IPV4  Setting."""
+
+    def __init__(self, nic):
+        self._nic = nic
+
+    @property
+    def ip(self):
+        return self._nic._get('|IPADDR')
+
+    @property
+    def netmask(self):
+        return self._nic._get('|NETMASK')
+
+    @property
+    def gateway(self):
+        return self._nic._get('|GATEWAY')
+
+
+class StaticIpv4Conf(DhcpIpv4Conf):
+    """Static IPV4 Configuration."""
+
+    @property
+    def addrs(self):
+        """Return the list of associated IPv4 addresses."""
+        return [StaticIpv4Addr(self._nic)]
+
+
+class DhcpIpv6Conf:
+    """DHCP IPV6 Configuration."""
+
+    def __init__(self, nic):
+        self._nic = nic
+
+
+class StaticIpv6Addr:
+    """Static IPV6 Address."""
+
+    def __init__(self, nic, index):
+        self._nic = nic
+        self._index = index
+
+    @property
+    def ip(self):
+        return self._nic._get("|IPv6ADDR|" + str(self._index))
+
+    @property
+    def prefix(self):
+        return self._nic._get("|IPv6NETMASK|" + str(self._index))
+
+    @property
+    def gateway(self):
+        return self._nic._get("|IPv6GATEWAY|" + str(self._index))
+
+
+class StaticIpv6Conf(DhcpIpv6Conf):
+    """Static IPV6 Configuration."""
+
+    @property
+    def addrs(self):
+        """Return the list Associated IPV6 addresses."""
+        cnt = self._nic._get_count("|IPv6ADDR|")
+
+        res = []
+
+        for i in range(1, cnt + 1):
+            res.append(StaticIpv6Addr(self._nic, i))
+
+        return res
-- 
cgit v1.2.3


From 415c45a2b9b66603e672e8ea54cee8f40a19abd1 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Tue, 19 Jan 2016 18:24:54 -0800
Subject:   Fixed all the review comments from Daniel.   Added a new file i.e.
 nic_base.py which will be used a base calls for all   NIC related
 configuration.   Modified some code in nic.py.

---
 cloudinit/sources/helpers/vmware/imc/boot_proto.py |   2 +-
 cloudinit/sources/helpers/vmware/imc/config.py     |   6 +-
 .../sources/helpers/vmware/imc/config_file.py      |  40 ++----
 cloudinit/sources/helpers/vmware/imc/ipv4_mode.py  |   2 +-
 cloudinit/sources/helpers/vmware/imc/nic.py        | 118 +++++++---------
 cloudinit/sources/helpers/vmware/imc/nic_base.py   | 154 +++++++++++++++++++++
 6 files changed, 222 insertions(+), 100 deletions(-)
 create mode 100644 cloudinit/sources/helpers/vmware/imc/nic_base.py

(limited to 'cloudinit')

diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
index abfffd75..faba5887 100644
--- a/cloudinit/sources/helpers/vmware/imc/boot_proto.py
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -18,7 +18,7 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 
-class BootProto:
+class BootProtoEnum:
     """Specifies the NIC Boot Settings."""
 
     DHCP = 'dhcp'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
index 7eee47a5..aebc12a0 100644
--- a/cloudinit/sources/helpers/vmware/imc/config.py
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -66,7 +66,8 @@ class Config:
     def name_servers(self):
         """Return the list of DNS servers."""
         res = []
-        for i in range(1, self._configFile.get_count(Config.DNS) + 1):
+        cnt = self._configFile.get_count_with_prefix(Config.DNS)
+        for i in range(1, cnt + 1):
             key = Config.DNS + str(i)
             res.append(self._configFile[key])
 
@@ -76,7 +77,8 @@ class Config:
     def dns_suffixes(self):
         """Return the list of DNS Suffixes."""
         res = []
-        for i in range(1, self._configFile.get_count(Config.SUFFIX) + 1):
+        cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
+        for i in range(1, cnt + 1):
             key = Config.SUFFIX + str(i)
             res.append(self._configFile[key])
 
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index e08a2a9a..7c47d14c 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -32,7 +32,8 @@ logger = logging.getLogger(__name__)
 class ConfigFile(ConfigSource, dict):
     """ConfigFile module to load the content from a specified source."""
 
-    def __init__(self):
+    def __init__(self, filename):
+        self._loadConfigFile(filename)
         pass
 
     def _insertKey(self, key, val):
@@ -48,9 +49,9 @@ class ConfigFile(ConfigSource, dict):
         val = val.strip()
 
         if key.startswith('-') or '|-' in key:
-            canLog = 0
+            canLog = False
         else:
-            canLog = 1
+            canLog = True
 
         # "sensitive" settings shall not be logged
         if canLog:
@@ -64,7 +65,7 @@ class ConfigFile(ConfigSource, dict):
         """Return the number of properties present."""
         return len(self)
 
-    def loadConfigFile(self, filename):
+    def _loadConfigFile(self, filename):
         """
         Parses properties from the specified config file.
 
@@ -87,22 +88,9 @@ class ConfigFile(ConfigSource, dict):
             logger.debug("FOUND CATEGORY = '%s'" % category)
 
             for (key, value) in config.items(category):
-                # "sensitive" settings shall not be logged
-                if key.startswith('-'):
-                    canLog = 0
-                else:
-                    canLog = 1
-
-                if canLog:
-                    logger.debug("Processing key, value: '%s':'%s'" %
-                                 (key, value))
-                else:
-                    logger.debug("Processing key, value : "
-                                 "'*********************'")
-
                 self._insertKey(category + '|' + key, value)
 
-    def keep_current_value(self, key):
+    def should_keep_current_value(self, key):
         """
         Determines whether a value for a property must be kept.
 
@@ -114,9 +102,9 @@ class ConfigFile(ConfigSource, dict):
         """
         # helps to distinguish from "empty" value which is used to indicate
         # "removal"
-        return not key in self
+        return key not in self
 
-    def remove_current_value(self, key):
+    def should_remove_current_value(self, key):
         """
         Determines whether a value for the property must be removed.
 
@@ -135,17 +123,11 @@ class ConfigFile(ConfigSource, dict):
         else:
             return False
 
-    def get_count(self, prefix):
+    def get_count_with_prefix(self, prefix):
         """
-        Return the total number of keys that start with the
-        specified prefix.
+        Return the total count of keys that start with the specified prefix.
 
         Keyword arguments:
         prefix -- prefix of the key
         """
-        res = 0
-        for key in self.keys():
-            if key.startswith(prefix):
-                res += 1
-
-        return res
+        return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
index 28544e4f..33f88726 100644
--- a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -18,7 +18,7 @@
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
 
-class Ipv4Mode:
+class Ipv4ModeEnum:
     """
     The IPv4 configuration mode which directly represents the user's goal.
 
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index bb45a9e6..a7594874 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -17,10 +17,11 @@
 #    You should have received a copy of the GNU General Public License
 #    along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
-from .boot_proto import BootProto
+from .boot_proto import BootProtoEnum
+from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
 
 
-class Nic:
+class Nic(NicBase):
     """
     Holds the information about each NIC specified
     in the customization specification file
@@ -31,10 +32,10 @@ class Nic:
         self._configFile = configFile
 
     def _get(self, what):
-        return self._configFile.get(self.name + what, None)
+        return self._configFile.get(self.name + '|' + what, None)
 
-    def _get_count(self, prefix):
-        return self._configFile.get_count(self.name + prefix)
+    def _get_count_with_prefix(self, prefix):
+        return self._configFile.get_count_with_prefix(self.name + prefix)
 
     @property
     def name(self):
@@ -42,41 +43,52 @@ class Nic:
 
     @property
     def mac(self):
-        return self._get('|MACADDR').lower()
+        return self._get('MACADDR').lower()
 
     @property
-    def bootProto(self):
-        return self._get('|BOOTPROTO').lower()
+    def primary(self):
+        value = self._get('PRIMARY').lower()
+        return value == 'yes' or value == 'true'
 
     @property
-    def ipv4(self):
-        """
-        Retrieves the DHCP or Static IPv6 configuration
-        based on the BOOTPROTO property associated with the NIC
-        """
-        if self.bootProto == BootProto.STATIC:
-            return StaticIpv4Conf(self)
+    def onboot(self):
+        value = self._get('ONBOOT').lower()
+        return value == 'yes' or value == 'true'
 
-        return DhcpIpv4Conf(self)
+    @property
+    def bootProto(self):
+        return self._get('BOOTPROTO').lower()
 
     @property
-    def ipv6(self):
-        cnt = self._get_count("|IPv6ADDR|")
+    def ipv4_mode(self):
+        return self._get('IPv4_MODE').lower()
 
-        if cnt != 0:
-            return StaticIpv6Conf(self)
+    @property
+    def staticIpv4(self):
+        """
+        Checks the BOOTPROTO property and returns StaticIPv4Addr
+        configuration object if STATIC configuration is set.
+        """
+        if self.bootProto == BootProtoEnum.STATIC:
+            return [StaticIpv4Addr(self)]
+        else:
+            return None
 
-        return DhcpIpv6Conf(self)
+    @property
+    def staticIpv6(self):
+        cnt = self._get_count_with_prefix('|IPv6ADDR|')
 
+        if not cnt:
+            return None
 
-class DhcpIpv4Conf:
-    """DHCP Configuration Setting."""
+        result = []
+        for index in range(1, cnt + 1):
+            result.append(StaticIpv6Addr(self, index))
 
-    def __init__(self, nic):
-        self._nic = nic
+        return result
 
 
-class StaticIpv4Addr:
+class StaticIpv4Addr(StaticIpv4Base):
     """Static IPV4  Setting."""
 
     def __init__(self, nic):
@@ -84,34 +96,22 @@ class StaticIpv4Addr:
 
     @property
     def ip(self):
-        return self._nic._get('|IPADDR')
+        return self._nic._get('IPADDR')
 
     @property
     def netmask(self):
-        return self._nic._get('|NETMASK')
+        return self._nic._get('NETMASK')
 
     @property
-    def gateway(self):
-        return self._nic._get('|GATEWAY')
+    def gateways(self):
+        value = self._nic._get('GATEWAY')
+        if value:
+            return [x.strip() for x in value.split(',')]
+        else:
+            return None
 
 
-class StaticIpv4Conf(DhcpIpv4Conf):
-    """Static IPV4 Configuration."""
-
-    @property
-    def addrs(self):
-        """Return the list of associated IPv4 addresses."""
-        return [StaticIpv4Addr(self._nic)]
-
-
-class DhcpIpv6Conf:
-    """DHCP IPV6 Configuration."""
-
-    def __init__(self, nic):
-        self._nic = nic
-
-
-class StaticIpv6Addr:
+class StaticIpv6Addr(StaticIpv6Base):
     """Static IPV6 Address."""
 
     def __init__(self, nic, index):
@@ -120,28 +120,12 @@ class StaticIpv6Addr:
 
     @property
     def ip(self):
-        return self._nic._get("|IPv6ADDR|" + str(self._index))
+        return self._nic._get('IPv6ADDR|' + str(self._index))
 
     @property
-    def prefix(self):
-        return self._nic._get("|IPv6NETMASK|" + str(self._index))
+    def netmask(self):
+        return self._nic._get('IPv6NETMASK|' + str(self._index))
 
     @property
     def gateway(self):
-        return self._nic._get("|IPv6GATEWAY|" + str(self._index))
-
-
-class StaticIpv6Conf(DhcpIpv6Conf):
-    """Static IPV6 Configuration."""
-
-    @property
-    def addrs(self):
-        """Return the list Associated IPV6 addresses."""
-        cnt = self._nic._get_count("|IPv6ADDR|")
-
-        res = []
-
-        for i in range(1, cnt + 1):
-            res.append(StaticIpv6Addr(self._nic, i))
-
-        return res
+        return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
new file mode 100644
index 00000000..030ba311
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -0,0 +1,154 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2015 VMware Inc.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+
+class NicBase:
+    """
+    Define what are expected of each nic.
+    The following properties should be provided in an implementation class.
+    """
+
+    @property
+    def mac(self):
+        """
+        Retrieves the mac address of the nic
+        @return (str) : the MACADDR setting
+        """
+        raise NotImplementedError('MACADDR')
+
+    @property
+    def primary(self):
+        """
+        Retrieves whether the nic is the primary nic
+        Indicates whether NIC will be used to define the default gateway.
+        If none of the NICs is configured to be primary, default gateway won't
+        be set.
+        @return (bool): the PRIMARY setting
+        """
+        raise NotImplementedError('PRIMARY')
+
+    @property
+    def onboot(self):
+        """
+        Retrieves whether the nic should be up at the boot time
+        @return (bool) : the ONBOOT setting
+        """
+        raise NotImplementedError('ONBOOT')
+
+    @property
+    def bootProto(self):
+        """
+        Retrieves the boot protocol of the nic
+        @return (str): the BOOTPROTO setting, valid values: dhcp and static.
+        """
+        raise NotImplementedError('BOOTPROTO')
+
+    @property
+    def ipv4_mode(self):
+        """
+        Retrieves the IPv4_MODE
+        @return (str): the IPv4_MODE setting, valid values:
+        backwards_compatible, static, dhcp, disabled, as_is
+        """
+        raise NotImplementedError('IPv4_MODE')
+
+    @property
+    def staticIpv4(self):
+        """
+        Retrieves the static IPv4 configuration of the nic
+        @return (StaticIpv4Base list): the static ipv4 setting
+        """
+        raise NotImplementedError('Static IPv4')
+
+    @property
+    def staticIpv6(self):
+        """
+        Retrieves the IPv6 configuration of the nic
+        @return (StaticIpv6Base list): the static ipv6 setting
+        """
+        raise NotImplementedError('Static Ipv6')
+
+    def validate(self):
+        """
+        Validate the object
+        For example, the staticIpv4 property is required and should not be
+        empty when ipv4Mode is STATIC
+        """
+        raise NotImplementedError('Check constraints on properties')
+
+
+class StaticIpv4Base:
+    """
+    Define what are expected of a static IPv4 setting
+    The following properties should be provided in an implementation class.
+    """
+
+    @property
+    def ip(self):
+        """
+        Retrieves the Ipv4 address
+        @return (str): the IPADDR setting
+        """
+        raise NotImplementedError('Ipv4 Address')
+
+    @property
+    def netmask(self):
+        """
+        Retrieves the Ipv4 NETMASK setting
+        @return (str): the NETMASK setting
+        """
+        raise NotImplementedError('Ipv4 NETMASK')
+
+    @property
+    def gateways(self):
+        """
+        Retrieves the gateways on this Ipv4 subnet
+        @return (str list): the GATEWAY setting
+        """
+        raise NotImplementedError('Ipv4 GATEWAY')
+
+
+class StaticIpv6Base:
+    """Define what are expected of a static IPv6 setting
+    The following properties should be provided in an implementation class.
+    """
+
+    @property
+    def ip(self):
+        """
+        Retrieves the Ipv6 address
+        @return (str): the IPv6ADDR setting
+        """
+        raise NotImplementedError('Ipv6 Address')
+
+    @property
+    def netmask(self):
+        """
+        Retrieves the Ipv6 NETMASK setting
+        @return (str): the IPv6NETMASK setting
+        """
+        raise NotImplementedError('Ipv6 NETMASK')
+
+    @property
+    def gateway(self):
+        """
+        Retrieves the Ipv6 GATEWAY setting
+        @return (str): the IPv6GATEWAY setting
+        """
+        raise NotImplementedError('Ipv6 GATEWAY')
-- 
cgit v1.2.3


From ce13a13190356a598cb8d3aacbf87e91bc9eb4f1 Mon Sep 17 00:00:00 2001
From: Martin Pitt <martin.pitt@ubuntu.com>
Date: Thu, 28 Jan 2016 14:09:24 +0100
Subject: Use systemd-detect-virt to detect a container.

running-in-container is an Ubuntu-ism and going away.

LP: #1539016
---
 cloudinit/util.py | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 83c2c0d2..45d49e66 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -76,7 +76,9 @@ FALSE_STRINGS = ('off', '0', 'no', 'false')
 
 
 # Helper utils to see if running in a container
-CONTAINER_TESTS = ('running-in-container', 'lxc-is-container')
+CONTAINER_TESTS = (['systemd-detect-virt', '--quiet', '--container'],
+                   ['running-in-container'],
+                   ['lxc-is-container'])
 
 
 def decode_binary(blob, encoding='utf-8'):
@@ -1749,7 +1751,7 @@ def is_container():
         try:
             # try to run a helper program. if it returns true/zero
             # then we're inside a container. otherwise, no
-            subp([helper])
+            subp(helper)
             return True
         except (IOError, OSError):
             pass
-- 
cgit v1.2.3


From ee40614b0a34a110265493c176c64db823aa34b3 Mon Sep 17 00:00:00 2001
From: Wesley Wiedenmeier <wesley.wiedenmeier@gmail.com>
Date: Wed, 3 Feb 2016 22:21:40 -0600
Subject: lxd: add support for setting up lxd using 'lxd init'

If lxd key is present in cfg, then run 'lxd init' with values from the 'init'
entry in lxd configuration as flags.
---
 ChangeLog                                        |  1 +
 cloudinit/config/cc_lxd.py                       | 50 +++++++++++++++++++
 config/cloud.cfg                                 |  1 +
 tests/unittests/test_handler/test_handler_lxd.py | 62 ++++++++++++++++++++++++
 4 files changed, 114 insertions(+)
 create mode 100644 cloudinit/config/cc_lxd.py
 create mode 100644 tests/unittests/test_handler/test_handler_lxd.py

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index 0ba16492..9fbc920d 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -71,6 +71,7 @@
  - Azure: get instance id from dmi instead of SharedConfig (LP: #1506187)
  - systemd/power_state: fix power_state to work even if cloud-final
    exited non-zero (LP: #1449318)
+ - lxd: add support for setting up lxd using 'lxd init'
 0.7.6:
  - open 0.7.6
  - Enable vendordata on CloudSigma datasource (LP: #1303986)
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
new file mode 100644
index 00000000..0db8356b
--- /dev/null
+++ b/cloudinit/config/cc_lxd.py
@@ -0,0 +1,50 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2016 Canonical Ltd.
+#
+#    Author: Wesley Wiedenmeier <wesley.wiedenmeier@canonical.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+"""
+This module initializes lxd using 'lxd init'
+
+Example config:
+  #cloud-config
+  lxd:
+    init:
+      network_address: <ip addr>
+      network_port: <port>
+      storage_backend: <zfs/dir>
+      storage_create_device: <dev>
+      storage_create_loop: <size>
+      storage_pool: <name>
+      trust_password: <password>
+"""
+
+from cloudinit import util
+
+
+def handle(name, cfg, cloud, log, args):
+    if not cfg.get('lxd') and cfg['lxd'].get('init'):
+        log.debug("Skipping module named %s, not present or disabled by cfg")
+        return
+    lxd_conf = cfg['lxd']['init']
+    keys = ('network_address', 'network_port', 'storage_backend',
+            'storage_create_device', 'storage_create_loop', 'storage_pool',
+            'trust_password')
+    cmd = ['lxd', 'init', '--auto']
+    for k in keys:
+        if lxd_conf.get(k):
+            cmd.extend(["--%s" % k.replace('_', '-'), lxd_conf[k]])
+    util.subp(cmd)
diff --git a/config/cloud.cfg b/config/cloud.cfg
index 74794ab0..795df19f 100644
--- a/config/cloud.cfg
+++ b/config/cloud.cfg
@@ -56,6 +56,7 @@ cloud_config_modules:
  - fan
  - landscape
  - timezone
+ - lxd
  - puppet
  - chef
  - salt-minion
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
new file mode 100644
index 00000000..89863d52
--- /dev/null
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -0,0 +1,62 @@
+from cloudinit.config import cc_lxd
+from cloudinit import (util, distros, helpers, cloud)
+from cloudinit.sources import DataSourceNoCloud
+from .. import helpers as t_help
+
+import logging
+
+LOG = logging.getLogger(__name__)
+
+
+class TestLxd(t_help.TestCase):
+    def setUp(self):
+        super(TestLxd, self).setUp()
+        self.unapply = []
+        apply_patches([(util, 'subp', self._mock_subp)])
+        self.subp_called = []
+
+    def tearDown(self):
+        apply_patches([i for i in reversed(self.unapply)])
+
+    def _mock_subp(self, *args, **kwargs):
+        if 'args' not in kwargs:
+            kwargs['args'] = args[0]
+        self.subp_called.append(kwargs)
+        return
+
+    def _get_cloud(self, distro):
+        cls = distros.fetch(distro)
+        paths = helpers.Paths({})
+        d = cls(distro, {}, paths)
+        ds = DataSourceNoCloud.DataSourceNoCloud({}, d, paths)
+        cc = cloud.Cloud(ds, paths, {}, d, None)
+        return cc
+
+    def test_lxd_init(self):
+        cfg = {
+            'lxd': {
+                'init': {
+                    'network_address': '0.0.0.0',
+                    'storage_backend': 'zfs',
+                    'storage_pool': 'poolname',
+                }
+            }
+        }
+        cc = self._get_cloud('ubuntu')
+        cc_lxd.handle('cc_lxd', cfg, cc, LOG, [])
+
+        self.assertEqual(
+                self.subp_called[0].get('args'),
+                ['lxd', 'init', '--auto', '--network-address', '0.0.0.0',
+                 '--storage-backend', 'zfs', '--storage-pool', 'poolname'])
+
+
+def apply_patches(patches):
+    ret = []
+    for (ref, name, replace) in patches:
+        if replace is None:
+            continue
+        orig = getattr(ref, name)
+        setattr(ref, name, replace)
+        ret.append((ref, name, orig))
+    return ret
-- 
cgit v1.2.3


From 75ba44d2730b89f13b2069961ea8de63f65ea780 Mon Sep 17 00:00:00 2001
From: Robert Jennings <robert.jennings@canonical.com>
Date: Thu, 4 Feb 2016 15:52:08 -0600
Subject: SmartOS: Add support for Joyent LX-Brand Zones (LP: #1540965)

LX-brand zones on Joyent's SmartOS use a different metadata source
(socket file) than the KVM-based SmartOS virtualization (serial port).
This patch adds support for recognizing the different flavors of
virtualization on SmartOS and setting up a metadata source file object.
After the file object is created, the rest of the code for the datasource

LP: #1540965
---
 cloudinit/sources/DataSourceSmartOS.py          | 257 ++++++++++++++----------
 doc/examples/cloud-config-datasources.txt       |   7 +
 tests/unittests/test_datasource/test_smartos.py |  85 +++++---
 3 files changed, 216 insertions(+), 133 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index c9b497df..7453379a 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -20,10 +20,13 @@
 #    Datasource for provisioning on SmartOS. This works on Joyent
 #        and public/private Clouds using SmartOS.
 #
-#    SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+#    SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
 #        The meta-data is transmitted via key/value pairs made by
 #        requests on the console. For example, to get the hostname, you
 #        would send "GET hostname" on /dev/ttyS1.
+#        For Linux Guests running in LX-Brand Zones on SmartOS hosts
+#        a socket (/native/.zonecontrol/metadata.sock) is used instead
+#        of a serial console.
 #
 #   Certain behavior is defined by the DataDictionary
 #       http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
@@ -34,6 +37,8 @@ import contextlib
 import os
 import random
 import re
+import socket
+import stat
 
 import serial
 
@@ -46,6 +51,7 @@ LOG = logging.getLogger(__name__)
 
 SMARTOS_ATTRIB_MAP = {
     # Cloud-init Key : (SmartOS Key, Strip line endings)
+    'instance-id': ('sdc:uuid', True),
     'local-hostname': ('hostname', True),
     'public-keys': ('root_authorized_keys', True),
     'user-script': ('user-script', False),
@@ -76,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME]
 #
 BUILTIN_DS_CONFIG = {
     'serial_device': '/dev/ttyS1',
+    'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
     'seed_timeout': 60,
     'no_base64_decode': ['root_authorized_keys',
                          'motd_sys_info',
@@ -83,6 +90,7 @@ BUILTIN_DS_CONFIG = {
                          'user-data',
                          'user-script',
                          'sdc:datacenter_name',
+                         'sdc:uuid',
                         ],
     'base64_keys': [],
     'base64_all': False,
@@ -150,17 +158,27 @@ class DataSourceSmartOS(sources.DataSource):
     def __init__(self, sys_cfg, distro, paths):
         sources.DataSource.__init__(self, sys_cfg, distro, paths)
         self.is_smartdc = None
-
         self.ds_cfg = util.mergemanydict([
             self.ds_cfg,
             util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
             BUILTIN_DS_CONFIG])
 
         self.metadata = {}
-        self.cfg = BUILTIN_CLOUD_CONFIG
 
-        self.seed = self.ds_cfg.get("serial_device")
-        self.seed_timeout = self.ds_cfg.get("serial_timeout")
+        # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+        # report 'BrandZ virtual linux' as the kernel version
+        if os.uname()[3].lower() == 'brandz virtual linux':
+            LOG.debug("Host is SmartOS, guest in Zone")
+            self.is_smartdc = True
+            self.smartos_type = 'lx-brand'
+            self.cfg = {}
+            self.seed = self.ds_cfg.get("metadata_sockfile")
+        else:
+            self.is_smartdc = True
+            self.smartos_type = 'kvm'
+            self.seed = self.ds_cfg.get("serial_device")
+            self.cfg = BUILTIN_CLOUD_CONFIG
+            self.seed_timeout = self.ds_cfg.get("serial_timeout")
         self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
         self.b64_keys = self.ds_cfg.get('base64_keys')
         self.b64_all = self.ds_cfg.get('base64_all')
@@ -170,12 +188,49 @@ class DataSourceSmartOS(sources.DataSource):
         root = sources.DataSource.__str__(self)
         return "%s [seed=%s]" % (root, self.seed)
 
+    def _get_seed_file_object(self):
+        if not self.seed:
+            raise AttributeError("seed device is not set")
+
+        if self.smartos_type == 'lx-brand':
+            if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
+                LOG.debug("Seed %s is not a socket", self.seed)
+                return None
+            sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+            sock.connect(self.seed)
+            return sock.makefile('rwb')
+        else:
+            if not stat.S_ISCHR(os.stat(self.seed).st_mode):
+                LOG.debug("Seed %s is not a character device")
+                return None
+            ser = serial.Serial(self.seed, timeout=self.seed_timeout)
+            if not ser.isOpen():
+                raise SystemError("Unable to open %s" % self.seed)
+            return ser
+        return None
+
+    def _set_provisioned(self):
+        '''Mark the instance provisioning state as successful.
+
+        When run in a zone, the host OS will look for /var/svc/provisioning
+        to be renamed as /var/svc/provision_success.   This should be done
+        after meta-data is successfully retrieved and from this point
+        the host considers the provision of the zone to be a success and
+        keeps the zone running.
+        '''
+
+        LOG.debug('Instance provisioning state set as successful')
+        svc_path = '/var/svc'
+        if os.path.exists('/'.join([svc_path, 'provisioning'])):
+            os.rename('/'.join([svc_path, 'provisioning']),
+                      '/'.join([svc_path, 'provision_success']))
+
     def get_data(self):
         md = {}
         ud = ""
 
         if not device_exists(self.seed):
-            LOG.debug("No serial device '%s' found for SmartOS datasource",
+            LOG.debug("No metadata device '%s' found for SmartOS datasource",
                       self.seed)
             return False
 
@@ -185,29 +240,36 @@ class DataSourceSmartOS(sources.DataSource):
             LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
             return False
 
-        dmi_info = dmi_data()
-        if dmi_info is False:
-            LOG.debug("No dmidata utility found")
-            return False
-
-        system_uuid, system_type = tuple(dmi_info)
-        if 'smartdc' not in system_type.lower():
-            LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+        # SDC KVM instances will provide dmi data, LX-brand does not
+        if self.smartos_type == 'kvm':
+            dmi_info = dmi_data()
+            if dmi_info is False:
+                LOG.debug("No dmidata utility found")
+                return False
+
+            system_type = dmi_info
+            if 'smartdc' not in system_type.lower():
+                LOG.debug("Host is not on SmartOS. system_type=%s",
+                          system_type)
+                return False
+            LOG.debug("Host is SmartOS, guest in KVM")
+
+        seed_obj = self._get_seed_file_object()
+        if seed_obj is None:
+            LOG.debug('Seed file object not found.')
             return False
-        self.is_smartdc = True
-        md['instance-id'] = system_uuid
+        with contextlib.closing(seed_obj) as seed:
+            b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
+            if b64_keys is not None:
+                self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
 
-        b64_keys = self.query('base64_keys', strip=True, b64=False)
-        if b64_keys is not None:
-            self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+            b64_all = self.query('base64_all', seed, strip=True, b64=False)
+            if b64_all is not None:
+                self.b64_all = util.is_true(b64_all)
 
-        b64_all = self.query('base64_all', strip=True, b64=False)
-        if b64_all is not None:
-            self.b64_all = util.is_true(b64_all)
-
-        for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
-            smartos_noun, strip = attribute
-            md[ci_noun] = self.query(smartos_noun, strip=strip)
+            for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+                smartos_noun, strip = attribute
+                md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
 
         # @datadictionary: This key may contain a program that is written
         # to a file in the filesystem of the guest on each boot and then
@@ -240,7 +302,7 @@ class DataSourceSmartOS(sources.DataSource):
 
         # Handle the cloud-init regular meta
         if not md['local-hostname']:
-            md['local-hostname'] = system_uuid
+            md['local-hostname'] = md['instance-id']
 
         ud = None
         if md['user-data']:
@@ -257,6 +319,8 @@ class DataSourceSmartOS(sources.DataSource):
         self.metadata = util.mergemanydict([md, self.metadata])
         self.userdata_raw = ud
         self.vendordata_raw = md['vendor-data']
+
+        self._set_provisioned()
         return True
 
     def device_name_to_device(self, name):
@@ -268,40 +332,64 @@ class DataSourceSmartOS(sources.DataSource):
     def get_instance_id(self):
         return self.metadata['instance-id']
 
-    def query(self, noun, strip=False, default=None, b64=None):
+    def query(self, noun, seed_file, strip=False, default=None, b64=None):
         if b64 is None:
             if noun in self.smartos_no_base64:
                 b64 = False
             elif self.b64_all or noun in self.b64_keys:
                 b64 = True
 
-        return query_data(noun=noun, strip=strip, seed_device=self.seed,
-                          seed_timeout=self.seed_timeout, default=default,
-                          b64=b64)
+        return self._query_data(noun, seed_file, strip=strip,
+                                default=default, b64=b64)
 
+    def _query_data(self, noun, seed_file, strip=False,
+                    default=None, b64=None):
+        """Makes a request via "GET <NOUN>"
 
-def device_exists(device):
-    """Symplistic method to determine if the device exists or not"""
-    return os.path.exists(device)
+           In the response, the first line is the status, while subsequent
+           lines are is the value. A blank line with a "." is used to
+           indicate end of response.
 
+           If the response is expected to be base64 encoded, then set
+           b64encoded to true. Unfortantely, there is no way to know if
+           something is 100% encoded, so this method relies on being told
+           if the data is base64 or not.
+        """
 
-def get_serial(seed_device, seed_timeout):
-    """This is replaced in unit testing, allowing us to replace
-        serial.Serial with a mocked class.
+        if not noun:
+            return False
 
-        The timeout value of 60 seconds should never be hit. The value
-        is taken from SmartOS own provisioning tools. Since we are reading
-        each line individually up until the single ".", the transfer is
-        usually very fast (i.e. microseconds) to get the response.
-    """
-    if not seed_device:
-        raise AttributeError("seed_device value is not set")
+        response = JoyentMetadataClient(seed_file).get_metadata(noun)
+
+        if response is None:
+            return default
+
+        if b64 is None:
+            b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
+                                   default=False, strip=True)
+            b64 = util.is_true(b64)
+
+        resp = None
+        if b64 or strip:
+            resp = "".join(response).rstrip()
+        else:
+            resp = "".join(response)
 
-    ser = serial.Serial(seed_device, timeout=seed_timeout)
-    if not ser.isOpen():
-        raise SystemError("Unable to open %s" % seed_device)
+        if b64:
+            try:
+                return util.b64d(resp)
+            # Bogus input produces different errors in Python 2 and 3;
+            # catch both.
+            except (TypeError, binascii.Error):
+                LOG.warn("Failed base64 decoding key '%s'", noun)
+                return resp
 
-    return ser
+        return resp
+
+
+def device_exists(device):
+    """Symplistic method to determine if the device exists or not"""
+    return os.path.exists(device)
 
 
 class JoyentMetadataFetchException(Exception):
@@ -320,8 +408,8 @@ class JoyentMetadataClient(object):
         r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
         r'( (?P<payload>.+))?)')
 
-    def __init__(self, serial):
-        self.serial = serial
+    def __init__(self, metasource):
+        self.metasource = metasource
 
     def _checksum(self, body):
         return '{0:08x}'.format(
@@ -356,67 +444,30 @@ class JoyentMetadataClient(object):
                                             util.b64e(metadata_key))
         msg = 'V2 {0} {1} {2}\n'.format(
             len(message_body), self._checksum(message_body), message_body)
-        LOG.debug('Writing "%s" to serial port.', msg)
-        self.serial.write(msg.encode('ascii'))
-        response = self.serial.readline().decode('ascii')
-        LOG.debug('Read "%s" from serial port.', response)
-        return self._get_value_from_frame(request_id, response)
-
-
-def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
-               b64=None):
-    """Makes a request to via the serial console via "GET <NOUN>"
-
-        In the response, the first line is the status, while subsequent lines
-        are is the value. A blank line with a "." is used to indicate end of
-        response.
-
-        If the response is expected to be base64 encoded, then set b64encoded
-        to true. Unfortantely, there is no way to know if something is 100%
-        encoded, so this method relies on being told if the data is base64 or
-        not.
-    """
-    if not noun:
-        return False
-
-    with contextlib.closing(get_serial(seed_device, seed_timeout)) as ser:
-        client = JoyentMetadataClient(ser)
-        response = client.get_metadata(noun)
-
-    if response is None:
-        return default
-
-    if b64 is None:
-        b64 = query_data('b64-%s' % noun, seed_device=seed_device,
-                         seed_timeout=seed_timeout, b64=False,
-                         default=False, strip=True)
-        b64 = util.is_true(b64)
-
-    resp = None
-    if b64 or strip:
-        resp = "".join(response).rstrip()
-    else:
-        resp = "".join(response)
-
-    if b64:
-        try:
-            return util.b64d(resp)
-        # Bogus input produces different errors in Python 2 and 3; catch both.
-        except (TypeError, binascii.Error):
-            LOG.warn("Failed base64 decoding key '%s'", noun)
-            return resp
+        LOG.debug('Writing "%s" to metadata transport.', msg)
+        self.metasource.write(msg.encode('ascii'))
+        self.metasource.flush()
+
+        response = bytearray()
+        response.extend(self.metasource.read(1))
+        while response[-1:] !=  b'\n':
+            response.extend(self.metasource.read(1))
+        response = response.rstrip().decode('ascii')
+        LOG.debug('Read "%s" from metadata transport.', response)
+
+        if 'SUCCESS' not in response:
+            return None
 
-    return resp
+        return self._get_value_from_frame(request_id, response)
 
 
 def dmi_data():
-    sys_uuid = util.read_dmi_data("system-uuid")
     sys_type = util.read_dmi_data("system-product-name")
 
-    if not sys_uuid or not sys_type:
+    if not sys_type:
         return None
 
-    return (sys_uuid.lower(), sys_type)
+    return sys_type
 
 
 def write_boot_content(content, content_f, link=None, shebang=False,
diff --git a/doc/examples/cloud-config-datasources.txt b/doc/examples/cloud-config-datasources.txt
index 3bde4aac..2651c027 100644
--- a/doc/examples/cloud-config-datasources.txt
+++ b/doc/examples/cloud-config-datasources.txt
@@ -51,12 +51,19 @@ datasource:
       policy: on # [can be 'on', 'off' or 'force']
 
   SmartOS:
+    # For KVM guests:
     # Smart OS datasource works over a serial console interacting with
     # a server on the other end. By default, the second serial console is the
     # device. SmartOS also uses a serial timeout of 60 seconds.
     serial_device: /dev/ttyS1
     serial_timeout: 60
 
+    # For LX-Brand Zones guests:
+    # Smart OS datasource works over a socket interacting with
+    # the host on the other end. By default, the socket file is in
+    # the native .zoncontrol directory.
+    metadata_sockfile: /native/.zonecontrol/metadata.sock
+
     # a list of keys that will not be base64 decoded even if base64_all
     no_base64_decode: ['root_authorized_keys', 'motd_sys_info',
                        'iptables_disable']
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index adee9019..1235436d 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -31,6 +31,7 @@ import shutil
 import stat
 import tempfile
 import uuid
+import unittest
 from binascii import crc32
 
 import serial
@@ -56,12 +57,13 @@ MOCK_RETURNS = {
     'cloud-init:user-data': '\n'.join(['#!/bin/sh', '/bin/true', '']),
     'sdc:datacenter_name': 'somewhere2',
     'sdc:operator-script': '\n'.join(['bin/true', '']),
+    'sdc:uuid': str(uuid.uuid4()),
     'sdc:vendor-data': '\n'.join(['VENDOR_DATA', '']),
     'user-data': '\n'.join(['something', '']),
     'user-script': '\n'.join(['/bin/true', '']),
 }
 
-DMI_DATA_RETURN = (str(uuid.uuid4()), 'smartdc')
+DMI_DATA_RETURN = 'smartdc'
 
 
 def get_mock_client(mockdata):
@@ -111,7 +113,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         ret = apply_patches(patches)
         self.unapply += ret
 
-    def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None):
+    def _get_ds(self, sys_cfg=None, ds_cfg=None, mockdata=None, dmi_data=None,
+                is_lxbrand=False):
         mod = DataSourceSmartOS
 
         if mockdata is None:
@@ -124,9 +127,13 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
             return dmi_data
 
         def _os_uname():
-            # LP: #1243287. tests assume this runs, but running test on
-            # arm would cause them all to fail.
-            return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
+            if not is_lxbrand:
+                # LP: #1243287. tests assume this runs, but running test on
+                # arm would cause them all to fail.
+                return ('LINUX', 'NODENAME', 'RELEASE', 'VERSION', 'x86_64')
+            else:
+                return ('LINUX', 'NODENAME', 'RELEASE', 'BRANDZ VIRTUAL LINUX',
+                        'X86_64')
 
         if sys_cfg is None:
             sys_cfg = {}
@@ -136,7 +143,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
             sys_cfg['datasource']['SmartOS'] = ds_cfg
 
         self.apply_patches([(mod, 'LEGACY_USER_D', self.legacy_user_d)])
-        self.apply_patches([(mod, 'get_serial', mock.MagicMock())])
         self.apply_patches([
             (mod, 'JoyentMetadataClient', get_mock_client(mockdata))])
         self.apply_patches([(mod, 'dmi_data', _dmi_data)])
@@ -144,6 +150,7 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         self.apply_patches([(mod, 'device_exists', lambda d: True)])
         dsrc = mod.DataSourceSmartOS(sys_cfg, distro=None,
                                      paths=self.paths)
+        self.apply_patches([(dsrc, '_get_seed_file_object', mock.MagicMock())])
         return dsrc
 
     def test_seed(self):
@@ -151,14 +158,29 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         dsrc = self._get_ds()
         ret = dsrc.get_data()
         self.assertTrue(ret)
+        self.assertEquals('kvm', dsrc.smartos_type)
         self.assertEquals('/dev/ttyS1', dsrc.seed)
 
+    def test_seed_lxbrand(self):
+        # default seed should be /dev/ttyS1
+        dsrc = self._get_ds(is_lxbrand=True)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+        self.assertEquals('lx-brand', dsrc.smartos_type)
+        self.assertEquals('/native/.zonecontrol/metadata.sock', dsrc.seed)
+
     def test_issmartdc(self):
         dsrc = self._get_ds()
         ret = dsrc.get_data()
         self.assertTrue(ret)
         self.assertTrue(dsrc.is_smartdc)
 
+    def test_issmartdc_lxbrand(self):
+        dsrc = self._get_ds(is_lxbrand=True)
+        ret = dsrc.get_data()
+        self.assertTrue(ret)
+        self.assertTrue(dsrc.is_smartdc)
+
     def test_no_base64(self):
         ds_cfg = {'no_base64_decode': ['test_var1'], 'all_base': True}
         dsrc = self._get_ds(ds_cfg=ds_cfg)
@@ -169,7 +191,8 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         dsrc = self._get_ds(mockdata=MOCK_RETURNS)
         ret = dsrc.get_data()
         self.assertTrue(ret)
-        self.assertEquals(DMI_DATA_RETURN[0], dsrc.metadata['instance-id'])
+        self.assertEquals(MOCK_RETURNS['sdc:uuid'],
+                          dsrc.metadata['instance-id'])
 
     def test_root_keys(self):
         dsrc = self._get_ds(mockdata=MOCK_RETURNS)
@@ -407,18 +430,6 @@ class TestSmartOSDataSource(helpers.FilesystemMockingTestCase):
         self.assertEqual(dsrc.device_name_to_device('FOO'),
                          mydscfg['disk_aliases']['FOO'])
 
-    @mock.patch('cloudinit.sources.DataSourceSmartOS.JoyentMetadataClient')
-    @mock.patch('cloudinit.sources.DataSourceSmartOS.get_serial')
-    def test_serial_console_closed_on_error(self, get_serial, metadata_client):
-        class OurException(Exception):
-            pass
-        metadata_client.side_effect = OurException
-        try:
-            DataSourceSmartOS.query_data('noun', 'device', 0)
-        except OurException:
-            pass
-        self.assertEqual(1, get_serial.return_value.close.call_count)
-
 
 def apply_patches(patches):
     ret = []
@@ -447,14 +458,25 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
         }
 
         def make_response():
-            payload = ''
-            if self.response_parts['payload']:
-                payload = ' {0}'.format(self.response_parts['payload'])
-            del self.response_parts['payload']
-            return (
-                'V2 {length} {crc} {request_id} {command}{payload}\n'.format(
-                    payload=payload, **self.response_parts).encode('ascii'))
-        self.serial.readline.side_effect = make_response
+            payloadstr = ''
+            if 'payload' in self.response_parts:
+                payloadstr = ' {0}'.format(self.response_parts['payload'])
+            return ('V2 {length} {crc} {request_id} '
+                    '{command}{payloadstr}\n'.format(
+                    payloadstr=payloadstr,
+                    **self.response_parts).encode('ascii'))
+
+        self.metasource_data = None
+
+        def read_response(length):
+            if not self.metasource_data:
+                self.metasource_data = make_response()
+                self.metasource_data_len = len(self.metasource_data)
+            resp = self.metasource_data[:length]
+            self.metasource_data = self.metasource_data[length:]
+            return resp
+
+        self.serial.read.side_effect = read_response
         self.patched_funcs.enter_context(
             mock.patch('cloudinit.sources.DataSourceSmartOS.random.randint',
                        mock.Mock(return_value=self.request_id)))
@@ -477,7 +499,9 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
         client.get_metadata('some_key')
         self.assertEqual(1, self.serial.write.call_count)
         written_line = self.serial.write.call_args[0][0]
-        self.assertEndsWith(written_line, b'\n')
+        print(type(written_line))
+        self.assertEndsWith(written_line.decode('ascii'),
+            b'\n'.decode('ascii'))
         self.assertEqual(1, written_line.count(b'\n'))
 
     def _get_written_line(self, key='some_key'):
@@ -489,7 +513,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
         self.assertIsInstance(self._get_written_line(), six.binary_type)
 
     def test_get_metadata_line_starts_with_v2(self):
-        self.assertStartsWith(self._get_written_line(), b'V2')
+        foo = self._get_written_line()
+        self.assertStartsWith(foo.decode('ascii'), b'V2'.decode('ascii'))
 
     def test_get_metadata_uses_get_command(self):
         parts = self._get_written_line().decode('ascii').strip().split(' ')
@@ -526,7 +551,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
     def test_get_metadata_reads_a_line(self):
         client = self._get_client()
         client.get_metadata('some_key')
-        self.assertEqual(1, self.serial.readline.call_count)
+        self.assertEqual(self.metasource_data_len, self.serial.read.call_count)
 
     def test_get_metadata_returns_valid_value(self):
         client = self._get_client()
-- 
cgit v1.2.3


From a2e251c46307fed0b91e34084c361816829f251d Mon Sep 17 00:00:00 2001
From: Wesley Wiedenmeier <wesley.wiedenmeier@gmail.com>
Date: Thu, 4 Feb 2016 19:09:05 -0600
Subject:  - Ensure that lxd is installed before running lxd init.  - Handle
 init cfg separately from main cfg to allow multiple sections under lxd   
 config to be handled independantly.  - Check for properly formatted lxd init
 cfg

---
 cloudinit/config/cc_lxd.py | 36 ++++++++++++++++++++++++++----------
 1 file changed, 26 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 0db8356b..c9cf8704 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -36,15 +36,31 @@ from cloudinit import util
 
 
 def handle(name, cfg, cloud, log, args):
-    if not cfg.get('lxd') and cfg['lxd'].get('init'):
+    # Get config
+    lxd_cfg = cfg.get('lxd')
+    if not lxd_cfg and isinstance(lxd_cfg, dict):
         log.debug("Skipping module named %s, not present or disabled by cfg")
         return
-    lxd_conf = cfg['lxd']['init']
-    keys = ('network_address', 'network_port', 'storage_backend',
-            'storage_create_device', 'storage_create_loop', 'storage_pool',
-            'trust_password')
-    cmd = ['lxd', 'init', '--auto']
-    for k in keys:
-        if lxd_conf.get(k):
-            cmd.extend(["--%s" % k.replace('_', '-'), lxd_conf[k]])
-    util.subp(cmd)
+
+    # Ensure lxd is installed
+    if not util.which("lxd"):
+        try:
+            cloud.distro.install_packages(("lxd",))
+        except util.ProcessExecutionError as e:
+            log.warn("no lxd executable and could not install lxd: '%s'" % e)
+            return
+
+    # Set up lxd if init config is given
+    init_cfg = lxd_cfg.get('init')
+    if init_cfg:
+        if not isinstance(init_cfg, dict):
+            log.warn("lxd init config must be a dict of flag: val pairs")
+            return
+        init_keys = ('network_address', 'network_port', 'storage_backend',
+                     'storage_create_device', 'storage_create_loop',
+                     'storage_pool', 'trust_password')
+        cmd = ['lxd', 'init', '--auto']
+        for k in init_keys:
+            if init_cfg.get(k):
+                cmd.extend(["--%s" % k.replace('_', '-'), init_cfg[k]])
+        util.subp(cmd)
-- 
cgit v1.2.3


From 39f668e5db8d09c46eee3a5df73a69f8d85ba489 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Tue, 9 Feb 2016 17:54:07 -0800
Subject:   - Added the code to configure the NICs.   - Added the code to
 detect VMware Virtual Platform and apply the     customization based on the
 'Customization Specification File' Pushed     into the guest VM.

---
 cloudinit/sources/DataSourceOVF.py                 | 107 ++++++++-
 cloudinit/sources/helpers/vmware/imc/config_nic.py | 246 +++++++++++++++++++++
 cloudinit/sources/helpers/vmware/imc/nic.py        |  28 ++-
 3 files changed, 372 insertions(+), 9 deletions(-)
 create mode 100644 cloudinit/sources/helpers/vmware/imc/config_nic.py

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 58a4b2a2..add7d243 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -24,11 +24,16 @@ from xml.dom import minidom
 
 import base64
 import os
+import shutil
 import re
+import time
 
 from cloudinit import log as logging
 from cloudinit import sources
 from cloudinit import util
+from cloudinit.sources.helpers.vmware.imc.config import Config
+from cloudinit.sources.helpers.vmware.imc.config_file import ConfigFile
+from cloudinit.sources.helpers.vmware.imc.config_nic import NicConfigurator
 
 LOG = logging.getLogger(__name__)
 
@@ -50,13 +55,51 @@ class DataSourceOVF(sources.DataSource):
         found = []
         md = {}
         ud = ""
+        vmwarePlatformFound = False
+        vmwareImcConfigFilePath = ''
 
         defaults = {
             "instance-id": "iid-dsovf",
         }
 
         (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
-        if seedfile:
+        dmi_info = dmi_data()
+        system_uuid = ""
+        system_type = ""
+
+        if dmi_info is False:
+           LOG.debug("No dmidata utility found")
+        else:
+           system_uuid, system_type = tuple(dmi_info)
+
+        if 'vmware' in system_type.lower():
+            LOG.debug("VMware Virtual Platform found")
+            deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so")
+            if deployPkgPluginPath:
+                vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug,
+                                  msg="waiting for configuration file",
+                                  func=wait_for_imc_cfg_file,
+                                  args=("/tmp", "cust.cfg"))
+
+            if vmwareImcConfigFilePath:
+                LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath)
+            else:
+                LOG.debug("Didn't find VMware DeployPkg Config File Path")
+
+        if vmwareImcConfigFilePath:
+            try:
+                cf = ConfigFile(vmwareImcConfigFilePath)
+                conf = Config(cf)
+                (md, ud, cfg) = read_vmware_imc(conf)
+                nicConfigurator = NicConfigurator(conf.nics)
+                nicConfigurator.configure()
+                vmwarePlatformFound = True
+            except Exception as inst:
+                LOG.debug("Error while parsing the Customization Config File")
+            finally:
+                dirPath = os.path.dirname(vmwareImcConfigFilePath)
+                shutil.rmtree(dirPath)
+        elif seedfile:
             # Found a seed dir
             seed = os.path.join(self.paths.seed_dir, seedfile)
             (md, ud, cfg) = read_ovf_environment(contents)
@@ -76,7 +119,7 @@ class DataSourceOVF(sources.DataSource):
                 found.append(name)
 
         # There was no OVF transports found
-        if len(found) == 0:
+        if len(found) == 0 and not vmwarePlatformFound:
             return False
 
         if 'seedfrom' in md and md['seedfrom']:
@@ -108,7 +151,7 @@ class DataSourceOVF(sources.DataSource):
 
     def get_public_ssh_keys(self):
         if 'public-keys' not in self.metadata:
-            return []
+           return []
         pks = self.metadata['public-keys']
         if isinstance(pks, (list)):
             return pks
@@ -129,6 +172,31 @@ class DataSourceOVFNet(DataSourceOVF):
         self.supported_seed_starts = ("http://", "https://", "ftp://")
 
 
+def wait_for_imc_cfg_file(directoryPath, filename, maxwait=180, naplen=5):
+    waited = 0
+    
+    while waited < maxwait:
+        fileFullPath = search_file(directoryPath, filename)
+        if fileFullPath:
+            return fileFullPath
+        time.sleep(naplen)
+        waited += naplen
+    return None
+
+# This will return a dict with some content
+#  meta-data, user-data, some config
+def read_vmware_imc(config):
+    md = {}
+    cfg = {}
+    ud = ""
+    if config.host_name:
+       if config.domain_name:
+          md['local-hostname'] = config.host_name + "." + config.domain_name
+       else:
+          md['local-hostname'] = config.host_name
+
+    return (md, ud, cfg)
+
 # This will return a dict with some content
 #  meta-data, user-data, some config
 def read_ovf_environment(contents):
@@ -280,6 +348,39 @@ def get_properties(contents):
 
     return props
 
+def dmi_data():
+    sys_uuid = util.read_dmi_data("system-uuid")
+    sys_type = util.read_dmi_data("system-product-name")
+
+    if not sys_uuid or not sys_type:
+        return None
+
+    return (sys_uuid.lower(), sys_type)
+
+def search_file(directoryPath, filename):
+    if not directoryPath or not filename:
+       return None
+
+    dirs = []
+
+    if os.path.isdir(directoryPath):
+       dirs.append(directoryPath)
+
+    while dirs:
+        dir = dirs.pop()
+        children = []
+        try:
+            children.extend(os.listdir(dir))
+        except:
+            LOG.debug("Ignoring the error while searching the directory %s" % dir)
+        for child in children: 
+            childFullPath = os.path.join(dir, child)
+            if os.path.isdir(childFullPath):
+                dirs.append(childFullPath)
+            elif child == filename: 
+                return childFullPath
+
+    return None
 
 class XmlError(Exception):
     pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
new file mode 100644
index 00000000..8e2fc5d3
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -0,0 +1,246 @@
+# vi: ts=4 expandtab
+#
+#    Copyright (C) 2015 Canonical Ltd.
+#    Copyright (C) 2016 VMware INC.
+#
+#    Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+#    This program is free software: you can redistribute it and/or modify
+#    it under the terms of the GNU General Public License version 3, as
+#    published by the Free Software Foundation.
+#
+#    This program is distributed in the hope that it will be useful,
+#    but WITHOUT ANY WARRANTY; without even the implied warranty of
+#    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+#    GNU General Public License for more details.
+#
+#    You should have received a copy of the GNU General Public License
+#    along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import subprocess
+import re
+
+logger = logging.getLogger(__name__)
+
+
+class NicConfigurator:
+   def __init__(self, nics):
+      """
+      Initialize the Nic Configurator
+      @param nics (list) an array of nics to configure
+      """
+      self.nics = nics
+      self.mac2Name = {}
+      self.ipv4PrimaryGateway = None
+      self.ipv6PrimaryGateway = None
+      self.find_devices()
+      self._primaryNic = self.get_primary_nic()
+
+   def get_primary_nic(self):
+      """
+      Retrieve the primary nic if it exists
+      @return (NicBase): the primary nic if exists, None otherwise
+      """
+      primaryNic = None
+
+      for nic in self.nics:
+         if nic.primary:
+            if primaryNic:
+               raise Exception('There can only be one primary nic',
+                               primaryNic.mac, nic.mac)
+            primaryNic = nic
+
+      return primaryNic
+
+   def find_devices(self):
+      """
+      Create the mac2Name dictionary
+      The mac address(es) are in the lower case
+      """
+      cmd = 'ip addr show'
+      outText = subprocess.check_output(cmd, shell=True).decode()
+      sections = re.split(r'\n\d+: ', '\n' + outText)[1:]
+
+      macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+      for section in sections:
+         matcher = re.search(macPat, section)
+         if not matcher:  # Only keep info about nics
+            continue
+         mac = matcher.group(1).lower()
+         name = section.split(':', 1)[0]
+         self.mac2Name[mac] = name
+
+   def gen_one_nic(self, nic):
+      """
+      Return the lines needed to configure a nic
+      @return (str list): the string list to configure the nic
+      @param nic (NicBase): the nic to configure
+      """
+      lines = []
+      name = self.mac2Name.get(nic.mac.lower())
+      if not name:
+         raise ValueError('No known device has MACADDR: %s' % nic.mac)
+
+      if nic.onboot:
+         lines.append('auto %s' % name)
+
+      # Customize IPv4
+      lines.extend(self.gen_ipv4(name, nic))
+
+      # Customize IPv6
+      lines.extend(self.gen_ipv6(name, nic))
+
+      lines.append('')
+
+      return lines
+
+   def gen_ipv4(self, name, nic):
+      """
+      Return the lines needed to configure the IPv4 setting of a nic
+      @return (str list): the string list to configure the gateways
+      @param name (str): name of the nic
+      @param nic (NicBase): the nic to configure
+      """
+      lines = []
+
+      bootproto = nic.bootProto.lower()
+      if nic.ipv4_mode.lower() == 'disabled':
+         bootproto = 'manual'
+      lines.append('iface %s inet %s' % (name, bootproto))
+
+      if bootproto != 'static':
+         return lines
+
+      # Static Ipv4
+      v4 = nic.staticIpv4
+      if v4.ip:
+         lines.append('    address %s' % v4.ip)
+      if v4.netmask:
+         lines.append('    netmask %s' % v4.netmask)
+
+      # Add the primary gateway
+      if nic.primary and v4.gateways:
+         self.ipv4PrimaryGateway = v4.gateways[0]
+         lines.append('    gateway %s metric 0' % self.ipv4PrimaryGateway)
+         return lines
+
+      # Add routes if there is no primary nic
+      if not self._primaryNic:
+         lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+
+      return lines
+
+   def gen_ipv4_route(self, nic, gateways):
+      """
+      Return the lines needed to configure additional Ipv4 route
+      @return (str list): the string list to configure the gateways
+      @param nic (NicBase): the nic to configure
+      @param gateways (str list): the list of gateways
+      """
+      lines = []
+
+      for gateway in gateways:
+         lines.append('    up route add default gw %s metric 10000' % gateway)
+
+      return lines
+
+   def gen_ipv6(self, name, nic):
+      """
+      Return the lines needed to configure the gateways for a nic
+      @return (str list): the string list to configure the gateways
+      @param name (str): name of the nic
+      @param nic (NicBase): the nic to configure
+      """
+      lines = []
+
+      if not nic.staticIpv6:
+         return lines
+
+      # Static Ipv6
+      addrs = nic.staticIpv6
+      lines.append('iface %s inet6 static' % name)
+      lines.append('    address %s' % addrs[0].ip)
+      lines.append('    netmask %s' % addrs[0].netmask)
+
+      for addr in addrs[1:]:
+         lines.append('    up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
+                                                              addr.netmask))
+      # Add the primary gateway
+      if nic.primary:
+         for addr in addrs:
+            if addr.gateway:
+               self.ipv6PrimaryGateway = addr.gateway
+               lines.append('    gateway %s' % self.ipv6PrimaryGateway)
+               return lines
+
+      # Add routes if there is no primary nic
+      if not self._primaryNic:
+         lines.extend(self._genIpv6Route(name, nic, addrs))
+
+      return lines
+
+   def _genIpv6Route(self, name, nic, addrs):
+      lines = []
+
+      for addr in addrs:
+         lines.append('    up route -A inet6 add default gw %s metric 10000' %
+                      addr.gateway)
+
+      return lines
+
+   def generate(self):
+      """Return the lines that is needed to configure the nics"""
+      lines = []
+      lines.append('iface lo inet loopback')
+      lines.append('auto lo')
+      lines.append('')
+
+      for nic in self.nics:
+         lines.extend(self.gen_one_nic(nic))
+
+      return lines
+
+   def clear_dhcp(self):
+      logger.info('Clearing DHCP leases')
+
+      subprocess.call('pkill dhclient', shell=True)
+      subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True)
+
+   def if_down_up(self):
+      names = []
+      for nic in self.nics:
+         name = self.mac2Name.get(nic.mac.lower())
+         names.append(name)
+
+      for name in names:
+         logger.info('Bring down interface %s' % name)
+         subprocess.check_call('ifdown %s' % name, shell=True)
+
+      self.clear_dhcp()
+
+      for name in names:
+         logger.info('Bring up interface %s' % name)
+         subprocess.check_call('ifup %s' % name, shell=True)
+
+   def configure(self):
+      """
+      Configure the /etc/network/intefaces
+      Make a back up of the original
+      """
+      containingDir = '/etc/network'
+
+      interfaceFile = os.path.join(containingDir, 'interfaces')
+      originalFile = os.path.join(containingDir,
+                                  'interfaces.before_vmware_customization')
+
+      if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
+         os.rename(interfaceFile, originalFile)
+
+      lines = self.generate()
+      with open(interfaceFile, 'w') as fp:
+         for line in lines:
+            fp.write('%s\n' % line)
+
+      self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index a7594874..6628a3ec 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -47,21 +47,37 @@ class Nic(NicBase):
 
     @property
     def primary(self):
-        value = self._get('PRIMARY').lower()
-        return value == 'yes' or value == 'true'
+        value = self._get('PRIMARY')
+        if value:
+           value = value.lower()
+           return value == 'yes' or value == 'true'
+        else:
+           return False
 
     @property
     def onboot(self):
-        value = self._get('ONBOOT').lower()
-        return value == 'yes' or value == 'true'
+        value = self._get('ONBOOT')
+        if value:
+           value = value.lower()
+           return value == 'yes' or value == 'true'
+        else:
+           return False
 
     @property
     def bootProto(self):
-        return self._get('BOOTPROTO').lower()
+        value = self._get('BOOTPROTO')
+        if value:
+           return value.lower()
+        else:
+           return ""
 
     @property
     def ipv4_mode(self):
-        return self._get('IPv4_MODE').lower()
+        value = self._get('IPv4_MODE')
+        if value:
+           return value.lower()
+        else:
+           return ""
 
     @property
     def staticIpv4(self):
-- 
cgit v1.2.3


From 0ce71cb8975e19677eea415101e15da5f4095cd5 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Tue, 16 Feb 2016 17:34:24 -0800
Subject:  - Used proper 4 space indentations for config_nic.py and nic.py  -
 Implemented the 'search_file' function using 'os.walk()'  - Fixed few
 variable names.  - Removed size() function in config_file.py  - Updated the
 test_config_file.py to use len() instead of .size()

---
 cloudinit/sources/DataSourceOVF.py                 |  34 +-
 .../sources/helpers/vmware/imc/config_file.py      |   4 -
 cloudinit/sources/helpers/vmware/imc/config_nic.py | 433 +++++++++++----------
 cloudinit/sources/helpers/vmware/imc/nic.py        |  20 +-
 tests/unittests/test_vmware_config_file.py         |   4 +-
 5 files changed, 238 insertions(+), 257 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index add7d243..6d3bf7bb 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -64,13 +64,12 @@ class DataSourceOVF(sources.DataSource):
 
         (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
         dmi_info = dmi_data()
-        system_uuid = ""
         system_type = ""
 
-        if dmi_info is False:
+        if dmi_info is None:
            LOG.debug("No dmidata utility found")
         else:
-           system_uuid, system_type = tuple(dmi_info)
+           (_, system_type) = dmi_info
 
         if 'vmware' in system_type.lower():
             LOG.debug("VMware Virtual Platform found")
@@ -172,11 +171,11 @@ class DataSourceOVFNet(DataSourceOVF):
         self.supported_seed_starts = ("http://", "https://", "ftp://")
 
 
-def wait_for_imc_cfg_file(directoryPath, filename, maxwait=180, naplen=5):
+def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
     waited = 0
     
     while waited < maxwait:
-        fileFullPath = search_file(directoryPath, filename)
+        fileFullPath = search_file(dirpath, filename)
         if fileFullPath:
             return fileFullPath
         time.sleep(naplen)
@@ -357,28 +356,13 @@ def dmi_data():
 
     return (sys_uuid.lower(), sys_type)
 
-def search_file(directoryPath, filename):
-    if not directoryPath or not filename:
+def search_file(dirpath, filename):
+    if not dirpath or not filename:
        return None
 
-    dirs = []
-
-    if os.path.isdir(directoryPath):
-       dirs.append(directoryPath)
-
-    while dirs:
-        dir = dirs.pop()
-        children = []
-        try:
-            children.extend(os.listdir(dir))
-        except:
-            LOG.debug("Ignoring the error while searching the directory %s" % dir)
-        for child in children: 
-            childFullPath = os.path.join(dir, child)
-            if os.path.isdir(childFullPath):
-                dirs.append(childFullPath)
-            elif child == filename: 
-                return childFullPath
+    for root, dirs, files in os.walk(dirpath):
+        if filename in files:
+            return os.path.join(root, filename)
 
     return None
 
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
index 7c47d14c..bb9fb7dc 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_file.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -61,10 +61,6 @@ class ConfigFile(ConfigSource, dict):
 
         self[key] = val
 
-    def size(self):
-        """Return the number of properties present."""
-        return len(self)
-
     def _loadConfigFile(self, filename):
         """
         Parses properties from the specified config file.
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 8e2fc5d3..d79e6936 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -26,221 +26,222 @@ logger = logging.getLogger(__name__)
 
 
 class NicConfigurator:
-   def __init__(self, nics):
-      """
-      Initialize the Nic Configurator
-      @param nics (list) an array of nics to configure
-      """
-      self.nics = nics
-      self.mac2Name = {}
-      self.ipv4PrimaryGateway = None
-      self.ipv6PrimaryGateway = None
-      self.find_devices()
-      self._primaryNic = self.get_primary_nic()
-
-   def get_primary_nic(self):
-      """
-      Retrieve the primary nic if it exists
-      @return (NicBase): the primary nic if exists, None otherwise
-      """
-      primaryNic = None
-
-      for nic in self.nics:
-         if nic.primary:
-            if primaryNic:
-               raise Exception('There can only be one primary nic',
-                               primaryNic.mac, nic.mac)
+    def __init__(self, nics):
+        """
+        Initialize the Nic Configurator
+        @param nics (list) an array of nics to configure
+        """
+        self.nics = nics
+        self.mac2Name = {}
+        self.ipv4PrimaryGateway = None
+        self.ipv6PrimaryGateway = None
+        self.find_devices()
+        self._primaryNic = self.get_primary_nic()
+
+    def get_primary_nic(self):
+        """
+        Retrieve the primary nic if it exists
+        @return (NicBase): the primary nic if exists, None otherwise
+        """
+        primaryNic = None
+
+        for nic in self.nics:
+            if nic.primary:
+                if primaryNic:
+                    raise Exception('There can only be one primary nic',
+                                    primaryNic.mac, nic.mac)
             primaryNic = nic
 
-      return primaryNic
-
-   def find_devices(self):
-      """
-      Create the mac2Name dictionary
-      The mac address(es) are in the lower case
-      """
-      cmd = 'ip addr show'
-      outText = subprocess.check_output(cmd, shell=True).decode()
-      sections = re.split(r'\n\d+: ', '\n' + outText)[1:]
-
-      macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
-      for section in sections:
-         matcher = re.search(macPat, section)
-         if not matcher:  # Only keep info about nics
-            continue
-         mac = matcher.group(1).lower()
-         name = section.split(':', 1)[0]
-         self.mac2Name[mac] = name
-
-   def gen_one_nic(self, nic):
-      """
-      Return the lines needed to configure a nic
-      @return (str list): the string list to configure the nic
-      @param nic (NicBase): the nic to configure
-      """
-      lines = []
-      name = self.mac2Name.get(nic.mac.lower())
-      if not name:
-         raise ValueError('No known device has MACADDR: %s' % nic.mac)
-
-      if nic.onboot:
-         lines.append('auto %s' % name)
-
-      # Customize IPv4
-      lines.extend(self.gen_ipv4(name, nic))
-
-      # Customize IPv6
-      lines.extend(self.gen_ipv6(name, nic))
-
-      lines.append('')
-
-      return lines
-
-   def gen_ipv4(self, name, nic):
-      """
-      Return the lines needed to configure the IPv4 setting of a nic
-      @return (str list): the string list to configure the gateways
-      @param name (str): name of the nic
-      @param nic (NicBase): the nic to configure
-      """
-      lines = []
-
-      bootproto = nic.bootProto.lower()
-      if nic.ipv4_mode.lower() == 'disabled':
-         bootproto = 'manual'
-      lines.append('iface %s inet %s' % (name, bootproto))
-
-      if bootproto != 'static':
-         return lines
-
-      # Static Ipv4
-      v4 = nic.staticIpv4
-      if v4.ip:
-         lines.append('    address %s' % v4.ip)
-      if v4.netmask:
-         lines.append('    netmask %s' % v4.netmask)
-
-      # Add the primary gateway
-      if nic.primary and v4.gateways:
-         self.ipv4PrimaryGateway = v4.gateways[0]
-         lines.append('    gateway %s metric 0' % self.ipv4PrimaryGateway)
-         return lines
-
-      # Add routes if there is no primary nic
-      if not self._primaryNic:
-         lines.extend(self.gen_ipv4_route(nic, v4.gateways))
-
-      return lines
-
-   def gen_ipv4_route(self, nic, gateways):
-      """
-      Return the lines needed to configure additional Ipv4 route
-      @return (str list): the string list to configure the gateways
-      @param nic (NicBase): the nic to configure
-      @param gateways (str list): the list of gateways
-      """
-      lines = []
-
-      for gateway in gateways:
-         lines.append('    up route add default gw %s metric 10000' % gateway)
-
-      return lines
-
-   def gen_ipv6(self, name, nic):
-      """
-      Return the lines needed to configure the gateways for a nic
-      @return (str list): the string list to configure the gateways
-      @param name (str): name of the nic
-      @param nic (NicBase): the nic to configure
-      """
-      lines = []
-
-      if not nic.staticIpv6:
-         return lines
-
-      # Static Ipv6
-      addrs = nic.staticIpv6
-      lines.append('iface %s inet6 static' % name)
-      lines.append('    address %s' % addrs[0].ip)
-      lines.append('    netmask %s' % addrs[0].netmask)
-
-      for addr in addrs[1:]:
-         lines.append('    up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
-                                                              addr.netmask))
-      # Add the primary gateway
-      if nic.primary:
-         for addr in addrs:
-            if addr.gateway:
-               self.ipv6PrimaryGateway = addr.gateway
-               lines.append('    gateway %s' % self.ipv6PrimaryGateway)
-               return lines
-
-      # Add routes if there is no primary nic
-      if not self._primaryNic:
-         lines.extend(self._genIpv6Route(name, nic, addrs))
-
-      return lines
-
-   def _genIpv6Route(self, name, nic, addrs):
-      lines = []
-
-      for addr in addrs:
-         lines.append('    up route -A inet6 add default gw %s metric 10000' %
-                      addr.gateway)
-
-      return lines
-
-   def generate(self):
-      """Return the lines that is needed to configure the nics"""
-      lines = []
-      lines.append('iface lo inet loopback')
-      lines.append('auto lo')
-      lines.append('')
-
-      for nic in self.nics:
-         lines.extend(self.gen_one_nic(nic))
-
-      return lines
-
-   def clear_dhcp(self):
-      logger.info('Clearing DHCP leases')
-
-      subprocess.call('pkill dhclient', shell=True)
-      subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True)
-
-   def if_down_up(self):
-      names = []
-      for nic in self.nics:
-         name = self.mac2Name.get(nic.mac.lower())
-         names.append(name)
-
-      for name in names:
-         logger.info('Bring down interface %s' % name)
-         subprocess.check_call('ifdown %s' % name, shell=True)
-
-      self.clear_dhcp()
-
-      for name in names:
-         logger.info('Bring up interface %s' % name)
-         subprocess.check_call('ifup %s' % name, shell=True)
-
-   def configure(self):
-      """
-      Configure the /etc/network/intefaces
-      Make a back up of the original
-      """
-      containingDir = '/etc/network'
-
-      interfaceFile = os.path.join(containingDir, 'interfaces')
-      originalFile = os.path.join(containingDir,
-                                  'interfaces.before_vmware_customization')
-
-      if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
-         os.rename(interfaceFile, originalFile)
-
-      lines = self.generate()
-      with open(interfaceFile, 'w') as fp:
-         for line in lines:
-            fp.write('%s\n' % line)
-
-      self.if_down_up()
+        return primaryNic
+
+    def find_devices(self):
+        """
+        Create the mac2Name dictionary
+        The mac address(es) are in the lower case
+        """
+        cmd = 'ip addr show'
+        outText = subprocess.check_output(cmd, shell=True).decode()
+        sections = re.split(r'\n\d+: ', '\n' + outText)[1:]
+
+        macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+        for section in sections:
+            matcher = re.search(macPat, section)
+            if not matcher:  # Only keep info about nics
+                continue
+            mac = matcher.group(1).lower()
+            name = section.split(':', 1)[0]
+            self.mac2Name[mac] = name
+
+    def gen_one_nic(self, nic):
+        """
+        Return the lines needed to configure a nic
+        @return (str list): the string list to configure the nic
+        @param nic (NicBase): the nic to configure
+        """
+        lines = []
+        name = self.mac2Name.get(nic.mac.lower())
+        if not name:
+            raise ValueError('No known device has MACADDR: %s' % nic.mac)
+
+        if nic.onboot:
+            lines.append('auto %s' % name)
+
+        # Customize IPv4
+        lines.extend(self.gen_ipv4(name, nic))
+
+        # Customize IPv6
+        lines.extend(self.gen_ipv6(name, nic))
+
+        lines.append('')
+
+        return lines
+
+    def gen_ipv4(self, name, nic):
+        """
+        Return the lines needed to configure the IPv4 setting of a nic
+        @return (str list): the string list to configure the gateways
+        @param name (str): name of the nic
+        @param nic (NicBase): the nic to configure
+        """
+        lines = []
+
+        bootproto = nic.bootProto.lower()
+        if nic.ipv4_mode.lower() == 'disabled':
+            bootproto = 'manual'
+        lines.append('iface %s inet %s' % (name, bootproto))
+
+        if bootproto != 'static':
+            return lines
+
+        # Static Ipv4
+        v4 = nic.staticIpv4
+        if v4.ip:
+            lines.append('    address %s' % v4.ip)
+        if v4.netmask:
+            lines.append('    netmask %s' % v4.netmask)
+
+        # Add the primary gateway
+        if nic.primary and v4.gateways:
+            self.ipv4PrimaryGateway = v4.gateways[0]
+            lines.append('    gateway %s metric 0' % self.ipv4PrimaryGateway)
+            return lines
+
+        # Add routes if there is no primary nic
+        if not self._primaryNic:
+            lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+
+        return lines
+
+    def gen_ipv4_route(self, nic, gateways):
+        """
+        Return the lines needed to configure additional Ipv4 route
+        @return (str list): the string list to configure the gateways
+        @param nic (NicBase): the nic to configure
+        @param gateways (str list): the list of gateways
+        """
+        lines = []
+
+        for gateway in gateways:
+            lines.append('    up route add default gw %s metric 10000' %
+                         gateway)
+
+        return lines
+
+    def gen_ipv6(self, name, nic):
+        """
+        Return the lines needed to configure the gateways for a nic
+        @return (str list): the string list to configure the gateways
+        @param name (str): name of the nic
+        @param nic (NicBase): the nic to configure
+        """
+        lines = []
+
+        if not nic.staticIpv6:
+            return lines
+
+        # Static Ipv6
+        addrs = nic.staticIpv6
+        lines.append('iface %s inet6 static' % name)
+        lines.append('    address %s' % addrs[0].ip)
+        lines.append('    netmask %s' % addrs[0].netmask)
+
+        for addr in addrs[1:]:
+            lines.append('    up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
+                                                                 addr.netmask))
+        # Add the primary gateway
+        if nic.primary:
+            for addr in addrs:
+                if addr.gateway:
+                    self.ipv6PrimaryGateway = addr.gateway
+                    lines.append('    gateway %s' % self.ipv6PrimaryGateway)
+                    return lines
+
+        # Add routes if there is no primary nic
+        if not self._primaryNic:
+            lines.extend(self._genIpv6Route(name, nic, addrs))
+
+        return lines
+
+    def _genIpv6Route(self, name, nic, addrs):
+        lines = []
+
+        for addr in addrs:
+            lines.append('    up route -A inet6 add default gw %s metric 10000' %
+                         addr.gateway)
+
+        return lines
+
+    def generate(self):
+        """Return the lines that is needed to configure the nics"""
+        lines = []
+        lines.append('iface lo inet loopback')
+        lines.append('auto lo')
+        lines.append('')
+
+        for nic in self.nics:
+            lines.extend(self.gen_one_nic(nic))
+
+        return lines
+
+    def clear_dhcp(self):
+        logger.info('Clearing DHCP leases')
+
+        subprocess.call('pkill dhclient', shell=True)
+        subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True)
+
+    def if_down_up(self):
+        names = []
+        for nic in self.nics:
+            name = self.mac2Name.get(nic.mac.lower())
+            names.append(name)
+
+        for name in names:
+            logger.info('Bring down interface %s' % name)
+            subprocess.check_call('ifdown %s' % name, shell=True)
+
+        self.clear_dhcp()
+
+        for name in names:
+            logger.info('Bring up interface %s' % name)
+            subprocess.check_call('ifup %s' % name, shell=True)
+
+    def configure(self):
+        """
+        Configure the /etc/network/intefaces
+        Make a back up of the original
+        """
+        containingDir = '/etc/network'
+
+        interfaceFile = os.path.join(containingDir, 'interfaces')
+        originalFile = os.path.join(containingDir,
+                                    'interfaces.before_vmware_customization')
+
+        if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
+            os.rename(interfaceFile, originalFile)
+
+        lines = self.generate()
+        with open(interfaceFile, 'w') as fp:
+            for line in lines:
+                fp.write('%s\n' % line)
+
+        self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
index 6628a3ec..b5d704ea 100644
--- a/cloudinit/sources/helpers/vmware/imc/nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -49,35 +49,35 @@ class Nic(NicBase):
     def primary(self):
         value = self._get('PRIMARY')
         if value:
-           value = value.lower()
-           return value == 'yes' or value == 'true'
+            value = value.lower()
+            return value == 'yes' or value == 'true'
         else:
-           return False
+            return False
 
     @property
     def onboot(self):
         value = self._get('ONBOOT')
         if value:
-           value = value.lower()
-           return value == 'yes' or value == 'true'
+            value = value.lower()
+            return value == 'yes' or value == 'true'
         else:
-           return False
+            return False
 
     @property
     def bootProto(self):
         value = self._get('BOOTPROTO')
         if value:
-           return value.lower()
+            return value.lower()
         else:
-           return ""
+            return ""
 
     @property
     def ipv4_mode(self):
         value = self._get('IPv4_MODE')
         if value:
-           return value.lower()
+            return value.lower()
         else:
-           return ""
+            return ""
 
     @property
     def staticIpv4(self):
diff --git a/tests/unittests/test_vmware_config_file.py b/tests/unittests/test_vmware_config_file.py
index 51166dd7..d5c7367b 100644
--- a/tests/unittests/test_vmware_config_file.py
+++ b/tests/unittests/test_vmware_config_file.py
@@ -36,12 +36,12 @@ class TestVmwareConfigFile(unittest.TestCase):
 
         cf.clear()
 
-        self.assertEqual(0, cf.size(), "clear size")
+        self.assertEqual(0, len(cf), "clear size")
 
         cf._insertKey("  PASSWORD|-PASS ", "  foo  ")
         cf._insertKey("BAR", "   ")
 
-        self.assertEqual(2, cf.size(), "insert size")
+        self.assertEqual(2, len(cf), "insert size")
         self.assertEqual('foo', cf["PASSWORD|-PASS"], "password")
         self.assertTrue("PASSWORD|-PASS" in cf, "hasPassword")
         self.assertFalse(cf.should_keep_current_value("PASSWORD|-PASS"),
-- 
cgit v1.2.3


From c5d2f79a982258d86181368b25ce6bc6638ef645 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Thu, 18 Feb 2016 18:31:07 -0800
Subject:  - Removed dmi_data function.  - Fixed few variable names.  - Used
 util.subp methods for process related manipulations.

---
 cloudinit/sources/DataSourceOVF.py                 | 20 +++--------
 cloudinit/sources/helpers/vmware/imc/config_nic.py | 40 +++++++++++-----------
 2 files changed, 24 insertions(+), 36 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 6d3bf7bb..72ba5aba 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -63,15 +63,11 @@ class DataSourceOVF(sources.DataSource):
         }
 
         (seedfile, contents) = get_ovf_env(self.paths.seed_dir)
-        dmi_info = dmi_data()
-        system_type = ""
 
-        if dmi_info is None:
-           LOG.debug("No dmidata utility found")
-        else:
-           (_, system_type) = dmi_info
-
-        if 'vmware' in system_type.lower():
+        system_type = util.read_dmi_data("system-product-name")
+        if system_type is None:
+           LOG.debug("No system-product-name found")
+        elif 'vmware' in system_type.lower():
             LOG.debug("VMware Virtual Platform found")
             deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so")
             if deployPkgPluginPath:
@@ -347,14 +343,6 @@ def get_properties(contents):
 
     return props
 
-def dmi_data():
-    sys_uuid = util.read_dmi_data("system-uuid")
-    sys_type = util.read_dmi_data("system-product-name")
-
-    if not sys_uuid or not sys_type:
-        return None
-
-    return (sys_uuid.lower(), sys_type)
 
 def search_file(dirpath, filename):
     if not dirpath or not filename:
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index d79e6936..172a1649 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -22,6 +22,8 @@ import os
 import subprocess
 import re
 
+from cloudinit import util
+
 logger = logging.getLogger(__name__)
 
 
@@ -43,32 +45,30 @@ class NicConfigurator:
         Retrieve the primary nic if it exists
         @return (NicBase): the primary nic if exists, None otherwise
         """
-        primaryNic = None
-
-        for nic in self.nics:
-            if nic.primary:
-                if primaryNic:
-                    raise Exception('There can only be one primary nic',
-                                    primaryNic.mac, nic.mac)
-            primaryNic = nic
-
-        return primaryNic
+        primary_nics = [nic for nic in self.nics if nic.primary]
+        if not primary_nics:
+           return None
+        elif len(primary_nics) > 1:
+           raise Exception('There can only be one primary nic',
+                            [nic.mac for nic in primary_nics])
+        else:
+           return primary_nics[0]
 
     def find_devices(self):
         """
         Create the mac2Name dictionary
         The mac address(es) are in the lower case
         """
-        cmd = 'ip addr show'
-        outText = subprocess.check_output(cmd, shell=True).decode()
-        sections = re.split(r'\n\d+: ', '\n' + outText)[1:]
+        cmd = ['ip', 'addr', 'show']
+        (output, err) = util.subp(cmd)
+        sections = re.split(r'\n\d+: ', '\n' + output)[1:]
 
         macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
         for section in sections:
-            matcher = re.search(macPat, section)
-            if not matcher:  # Only keep info about nics
+            match = re.search(macPat, section)
+            if not match:  # Only keep info about nics
                 continue
-            mac = matcher.group(1).lower()
+            mac = match.group(1).lower()
             name = section.split(':', 1)[0]
             self.mac2Name[mac] = name
 
@@ -206,8 +206,8 @@ class NicConfigurator:
     def clear_dhcp(self):
         logger.info('Clearing DHCP leases')
 
-        subprocess.call('pkill dhclient', shell=True)
-        subprocess.check_call('rm -f /var/lib/dhcp/*', shell=True)
+        util.subp(["pkill", "dhclient"])
+        util.subp(["rm", "-f", "/var/lib/dhcp/*"])
 
     def if_down_up(self):
         names = []
@@ -217,13 +217,13 @@ class NicConfigurator:
 
         for name in names:
             logger.info('Bring down interface %s' % name)
-            subprocess.check_call('ifdown %s' % name, shell=True)
+            util.subp(["ifdown", "%s" % name])
 
         self.clear_dhcp()
 
         for name in names:
             logger.info('Bring up interface %s' % name)
-            subprocess.check_call('ifup %s' % name, shell=True)
+            util.subp(["ifup", "%s" % name])
 
     def configure(self):
         """
-- 
cgit v1.2.3


From b20191f04c586147165a304b88a2b89c89f79225 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 25 Feb 2016 14:32:14 -0500
Subject: minor cleanups

---
 cloudinit/config/cc_lxd.py | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index c9cf8704..aaafb643 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,18 +47,20 @@ def handle(name, cfg, cloud, log, args):
         try:
             cloud.distro.install_packages(("lxd",))
         except util.ProcessExecutionError as e:
-            log.warn("no lxd executable and could not install lxd: '%s'" % e)
+            log.warn("no lxd executable and could not install lxd:", e)
             return
 
     # Set up lxd if init config is given
+    init_keys = (
+        'network_address', 'network_port', 'storage_backend',
+        'storage_create_device', 'storage_create_loop',
+        'storage_pool', 'trust_password')
     init_cfg = lxd_cfg.get('init')
     if init_cfg:
         if not isinstance(init_cfg, dict):
-            log.warn("lxd init config must be a dict of flag: val pairs")
+            log.warn("lxd/init config must be a dictionary. found a '%s'",
+                      type(f))
             return
-        init_keys = ('network_address', 'network_port', 'storage_backend',
-                     'storage_create_device', 'storage_create_loop',
-                     'storage_pool', 'trust_password')
         cmd = ['lxd', 'init', '--auto']
         for k in init_keys:
             if init_cfg.get(k):
-- 
cgit v1.2.3


From 14915526ca67bbf7842028d48170015b85f87469 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 1 Mar 2016 00:19:55 -0500
Subject: lxd: general fix after testing

A few changes:
 a.) change to using '--name=value' rather than '--name' 'value'
 b.) make sure only strings are passed to command
     (useful for storage_create_loop: which is likely an integer)
 c.) document simple working example
 d.) support installing zfs if not present and storage_backedn has it.
---
 cloudinit/config/cc_lxd.py                       | 35 ++++++++++++++++++------
 doc/examples/cloud-config-lxd.txt                |  7 +++++
 tests/unittests/test_handler/test_handler_lxd.py |  9 +++---
 3 files changed, 38 insertions(+), 13 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index aaafb643..84eec7a5 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -38,16 +38,36 @@ from cloudinit import util
 def handle(name, cfg, cloud, log, args):
     # Get config
     lxd_cfg = cfg.get('lxd')
-    if not lxd_cfg and isinstance(lxd_cfg, dict):
+    if not lxd_cfg:
         log.debug("Skipping module named %s, not present or disabled by cfg")
         return
+    if not isinstance(lxd_cfg, dict):
+        log.warn("lxd config must be a dictionary. found a '%s'",
+                 type(lxd_cfg))
+        return
+
+    init_cfg = lxd_cfg.get('init')
+    if not init_cfg:
+        init_cfg = {}
+
+    if not isinstance(init_cfg, dict):
+        log.warn("lxd/init config must be a dictionary. found a '%s'",
+                  type(init_cfg))
+        init_cfg = {}
+
+    packages = []
+    if (init_cfg.get("storage_backend") == "zfs" and not util.which('zfs')):
+       packages.append('zfs')
 
     # Ensure lxd is installed
     if not util.which("lxd"):
+        packages.append('lxd')
+    
+    if len(packages):
         try:
-            cloud.distro.install_packages(("lxd",))
+            cloud.distro.install_packages(packages)
         except util.ProcessExecutionError as e:
-            log.warn("no lxd executable and could not install lxd:", e)
+            log.warn("failed to install packages %s: %s", packages, e)
             return
 
     # Set up lxd if init config is given
@@ -55,14 +75,11 @@ def handle(name, cfg, cloud, log, args):
         'network_address', 'network_port', 'storage_backend',
         'storage_create_device', 'storage_create_loop',
         'storage_pool', 'trust_password')
-    init_cfg = lxd_cfg.get('init')
+
     if init_cfg:
-        if not isinstance(init_cfg, dict):
-            log.warn("lxd/init config must be a dictionary. found a '%s'",
-                      type(f))
-            return
         cmd = ['lxd', 'init', '--auto']
         for k in init_keys:
             if init_cfg.get(k):
-                cmd.extend(["--%s" % k.replace('_', '-'), init_cfg[k]])
+                cmd.extend(["--%s=%s" %
+                            (k.replace('_', '-'), str(init_cfg[k]))])
         util.subp(cmd)
diff --git a/doc/examples/cloud-config-lxd.txt b/doc/examples/cloud-config-lxd.txt
index f66da4c3..b9bb4aa5 100644
--- a/doc/examples/cloud-config-lxd.txt
+++ b/doc/examples/cloud-config-lxd.txt
@@ -19,3 +19,10 @@ lxd:
     network_port: 8443
     storage_backend: zfs
     storage_pool: datapool
+    storage_create_loop: 10
+
+
+# The simplist working configuration is
+# lxd:
+#  init:
+#   storage_backend: dir
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 4d858b8f..65794a41 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -43,9 +43,10 @@ class TestLxd(t_help.TestCase):
         self.assertTrue(mock_util.which.called)
         init_call = mock_util.subp.call_args_list[0][0][0]
         self.assertEquals(init_call,
-                          ['lxd', 'init', '--auto', '--network-address',
-                           '0.0.0.0', '--storage-backend', 'zfs',
-                           '--storage-pool', 'poolname'])
+                          ['lxd', 'init', '--auto',
+                           '--network-address=0.0.0.0',
+                           '--storage-backend=zfs',
+                           '--storage-pool=poolname'])
 
     @mock.patch("cloudinit.config.cc_lxd.util")
     def test_lxd_install(self, mock_util):
@@ -55,4 +56,4 @@ class TestLxd(t_help.TestCase):
         cc_lxd.handle('cc_lxd', self.lxd_cfg, cc, LOG, [])
         self.assertTrue(cc.distro.install_packages.called)
         install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
-        self.assertEquals(install_pkg, ('lxd',))
+        self.assertEquals(sorted(install_pkg), ['lxd', 'zfs'])
-- 
cgit v1.2.3


From 290afe72d53b5e38c3781934e23a676a3c1986e5 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Tue, 1 Mar 2016 12:30:31 -0500
Subject: timezone: use a symlink when updating /etc/localtime

Unless /etc/localtime is an existing file and not a symlink,
then we will symlink instead of copying the tz_file to /etc/localtime.

The copy was due to an old bug in Ubuntu, symlink should be preferred.

LP: #1543025
---
 ChangeLog                     | 2 ++
 cloudinit/distros/__init__.py | 6 +++++-
 2 files changed, 7 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index b31148ac..2f1f9f87 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -81,6 +81,8 @@
  - lxd: add support for setting up lxd using 'lxd init' (LP: #1522879)
  - Add Image Customization Parser for VMware vSphere Hypervisor
    Support. [Sankar Tanguturi]
+ - timezone: use a symlink rather than copy for /etc/localtime
+   unless it is already a file (LP: #1543025).
 
 0.7.6:
  - open 0.7.6
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 71884b32..8167c594 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -897,5 +897,9 @@ def set_etc_timezone(tz, tz_file=None, tz_conf="/etc/timezone",
     util.write_file(tz_conf, str(tz).rstrip() + "\n")
     # This ensures that the correct tz will be used for the system
     if tz_local and tz_file:
-        util.copy(tz_file, tz_local)
+        # use a symlink if there exists a symlink or tz_local is not present
+        if os.path.islink(tz_local) or not os.path.exists(tz_local):
+            os.symlink(tz_file, tz_local)
+        else:
+            util.copy(tz_file, tz_local)
     return
-- 
cgit v1.2.3


From 51a27968ae9805c747cdc27d35a31c49df6d2217 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Tue, 1 Mar 2016 16:43:50 -0800
Subject:  Added a kill switch for customization on VMware platform.  The
 customization is set to False by default and is triggered only  when the
 option disable_vmware_customization is set to false in  /etc/cloud/cloud.cfg

---
 cloudinit/sources/DataSourceOVF.py | 27 ++++++++++++++++-----------
 1 file changed, 16 insertions(+), 11 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 72ba5aba..d92c128c 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -68,18 +68,23 @@ class DataSourceOVF(sources.DataSource):
         if system_type is None:
            LOG.debug("No system-product-name found")
         elif 'vmware' in system_type.lower():
-            LOG.debug("VMware Virtual Platform found")
-            deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so")
-            if deployPkgPluginPath:
-                vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug,
-                                  msg="waiting for configuration file",
-                                  func=wait_for_imc_cfg_file,
-                                  args=("/tmp", "cust.cfg"))
-
-            if vmwareImcConfigFilePath:
-                LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath)
+            LOG.debug("VMware Virtualization Platform found")
+            if not util.get_cfg_option_bool(self.sys_cfg,
+                                        "disable_vmware_customization",
+                                        True):
+                deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so")
+                if deployPkgPluginPath:
+                    vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug,
+                                      msg="waiting for configuration file",
+                                      func=wait_for_imc_cfg_file,
+                                      args=("/tmp", "cust.cfg"))
+
+                if vmwareImcConfigFilePath:
+                    LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath)
+                else:
+                    LOG.debug("Did not find VMware DeployPkg Config File Path")
             else:
-                LOG.debug("Didn't find VMware DeployPkg Config File Path")
+                LOG.debug("Customization for VMware platform is disabled.")
 
         if vmwareImcConfigFilePath:
             try:
-- 
cgit v1.2.3


From ab6f166da7290928d56ff3c62a5280536e1d241f Mon Sep 17 00:00:00 2001
From: root <root@instance-16199.bigstep.io>
Date: Wed, 2 Mar 2016 08:51:16 +0000
Subject: Added Bigstep datasource.

---
 cloudinit/sources/DataSourceBigstep.py | 48 ++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)
 create mode 100644 cloudinit/sources/DataSourceBigstep.py

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
new file mode 100644
index 00000000..67d43eb3
--- /dev/null
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -0,0 +1,48 @@
+#
+#    Copyright (C) 2015-2016 Bigstep Cloud Ltd.
+#
+#    Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
+#
+
+import json
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceBigstep(sources.DataSource):
+    def __init__(self, sys_cfg, distro, paths):
+        sources.DataSource.__init__(self, sys_cfg, distro, paths)
+        self.metadata = {}
+        self.vendordata_raw = ""
+        self.userdata_raw = ""
+
+
+    def get_data(self, apply_filter=False):
+        url = get_url_from_file()
+        response = url_helper.readurl(url)
+        decoded = json.loads(response.contents)
+        self.metadata = decoded["metadata"]
+        self.vendordata_raw = decoded["vendordata_raw"]
+        self.userdata_raw = decoded["userdata_raw"]
+        return True
+
+
+def get_url_from_file():
+    content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
+    return content
+
+# Used to match classes to dependencies
+datasources = [
+    (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+    return sources.list_from_depends(depends, datasources)
+
-- 
cgit v1.2.3


From d5d89cfb1e61e6cc3f732a18ec1aa4d2b288489d Mon Sep 17 00:00:00 2001
From: root <root@instance-16199.bigstep.io>
Date: Wed, 2 Mar 2016 08:53:47 +0000
Subject: Pep8 changes to Bigstep datasource.

---
 cloudinit/sources/DataSourceBigstep.py | 2 --
 1 file changed, 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 67d43eb3..c22ffdb6 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -21,7 +21,6 @@ class DataSourceBigstep(sources.DataSource):
         self.vendordata_raw = ""
         self.userdata_raw = ""
 
-
     def get_data(self, apply_filter=False):
         url = get_url_from_file()
         response = url_helper.readurl(url)
@@ -45,4 +44,3 @@ datasources = [
 # Return a list of data sources that match this set of dependencies
 def get_datasource_list(depends):
     return sources.list_from_depends(depends, datasources)
-
-- 
cgit v1.2.3


From 603bdecc5aaa34043379aa4311c271d52dfe61e8 Mon Sep 17 00:00:00 2001
From: Alex Sirbu <alexandru.sirbu@bigstep.com>
Date: Wed, 2 Mar 2016 09:15:42 +0000
Subject: Added the hashed_passwd argument for the function create_user, which
 uses the already implemented functionality of changing the password with a
 hashed string, but which wasn't used anywhere.

---
 cloudinit/distros/__init__.py | 4 ++++
 1 file changed, 4 insertions(+)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 8167c594..6778c93a 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -393,6 +393,10 @@ class Distro(object):
         if 'plain_text_passwd' in kwargs and kwargs['plain_text_passwd']:
             self.set_passwd(name, kwargs['plain_text_passwd'])
 
+        # Set password if hashed password is provided and non-empty
+        if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
+            self.set_passwd(name, kwargs['hashed_passwd'], True)
+
         # Default locking down the account.  'lock_passwd' defaults to True.
         # lock account unless lock_password is False.
         if kwargs.get('lock_passwd', True):
-- 
cgit v1.2.3


From 921728d42731091f849d21dbef0920b84c559480 Mon Sep 17 00:00:00 2001
From: Alex Sirbu <alexandru.sirbu@bigstep.com>
Date: Wed, 2 Mar 2016 09:57:05 +0000
Subject: Used keyword for parameter in order to make it clearer what it
 represents.

---
 cloudinit/distros/__init__.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 6778c93a..fec18cd2 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -395,7 +395,7 @@ class Distro(object):
 
         # Set password if hashed password is provided and non-empty
         if 'hashed_passwd' in kwargs and kwargs['hashed_passwd']:
-            self.set_passwd(name, kwargs['hashed_passwd'], True)
+            self.set_passwd(name, kwargs['hashed_passwd'], hashed=True)
 
         # Default locking down the account.  'lock_passwd' defaults to True.
         # lock account unless lock_password is False.
-- 
cgit v1.2.3


From 568d15d7fb239e609fb70cc7c7a08205e640bf25 Mon Sep 17 00:00:00 2001
From: Ryan Harper <ryan.harper@canonical.com>
Date: Wed, 2 Mar 2016 13:23:55 -0600
Subject: Fix logic error in lxd config check

If the cloud-config does not contain and lxd dictionary then we should not
attempt to install the package.  Change the latter half of the check to
negate the dictionary type check.  This fix prevents us from always installing
lxd, rather than only installing when we have a config.

Fix pyflakes check on init_cfg dict error message.
---
 cloudinit/config/cc_lxd.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index aaafb643..7d8a0202 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -38,7 +38,7 @@ from cloudinit import util
 def handle(name, cfg, cloud, log, args):
     # Get config
     lxd_cfg = cfg.get('lxd')
-    if not lxd_cfg and isinstance(lxd_cfg, dict):
+    if not lxd_cfg and not isinstance(lxd_cfg, dict):
         log.debug("Skipping module named %s, not present or disabled by cfg")
         return
 
@@ -59,7 +59,7 @@ def handle(name, cfg, cloud, log, args):
     if init_cfg:
         if not isinstance(init_cfg, dict):
             log.warn("lxd/init config must be a dictionary. found a '%s'",
-                      type(f))
+                      type(init_cfg))
             return
         cmd = ['lxd', 'init', '--auto']
         for k in init_keys:
-- 
cgit v1.2.3


From c496b6a11d504ef62371cb5e03ac80b4ceb37540 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 3 Mar 2016 12:20:48 -0500
Subject: run pyflakes in more places, fix fallout

this makes 'make' run pyflakes, so failures there will stop a build.
also adds it to tox.
---
 Makefile                                                 | 6 ++++--
 cloudinit/sources/DataSourceOVF.py                       | 3 ++-
 cloudinit/sources/helpers/vmware/imc/config_nic.py       | 1 -
 cloudinit/util.py                                        | 2 +-
 tests/unittests/test_datasource/test_azure_helper.py     | 2 --
 tests/unittests/test_datasource/test_smartos.py          | 1 -
 tests/unittests/test_handler/test_handler_power_state.py | 2 +-
 tox.ini                                                  | 6 +++++-
 8 files changed, 13 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/Makefile b/Makefile
index bb0c5253..8987d51c 100644
--- a/Makefile
+++ b/Makefile
@@ -14,13 +14,15 @@ ifeq ($(distro),)
   distro = redhat
 endif
 
-all: test check_version
+all: check
+
+check: test check_version pyflakes
 
 pep8:
 	@$(CWD)/tools/run-pep8 $(PY_FILES)
 
 pyflakes:
-	@$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
+	@pyflakes $(PY_FILES)
 
 pip-requirements:
 	@echo "Installing cloud-init dependencies..."
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 72ba5aba..d12601a4 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -90,7 +90,8 @@ class DataSourceOVF(sources.DataSource):
                 nicConfigurator.configure()
                 vmwarePlatformFound = True
             except Exception as inst:
-                LOG.debug("Error while parsing the Customization Config File")
+                LOG.debug("Error while parsing the Customization "
+                          "Config File: %s", inst)
             finally:
                 dirPath = os.path.dirname(vmwareImcConfigFilePath)
                 shutil.rmtree(dirPath)
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
index 172a1649..6d721134 100644
--- a/cloudinit/sources/helpers/vmware/imc/config_nic.py
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -19,7 +19,6 @@
 
 import logging
 import os
-import subprocess
 import re
 
 from cloudinit import util
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 45d49e66..0a639bb9 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2147,7 +2147,7 @@ def _read_dmi_syspath(key):
         LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
         return key_data.strip()
 
-    except Exception as e:
+    except Exception:
         logexc(LOG, "failed read of %s", dmi_key_path)
         return None
 
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 8dbdfb0b..1134199b 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,6 +1,4 @@
 import os
-import struct
-import unittest
 
 from cloudinit.sources.helpers import azure as azure_helper
 from ..helpers import TestCase
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 1235436d..ccb9f080 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -31,7 +31,6 @@ import shutil
 import stat
 import tempfile
 import uuid
-import unittest
 from binascii import crc32
 
 import serial
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 5687b10d..cd376e9c 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -107,7 +107,7 @@ def check_lps_ret(psc_return, mode=None):
     if 'shutdown' not in psc_return[0][0]:
         errs.append("string 'shutdown' not in cmd")
 
-    if 'condition' is None:
+    if condition is None:
         errs.append("condition was not returned")
 
     if mode is not None:
diff --git a/tox.ini b/tox.ini
index b72df0c9..fd65f6ef 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist = py27,py3
+envlist = py27,py3,pyflakes
 recreate = True
 
 [testenv]
@@ -10,6 +10,10 @@ deps = -r{toxinidir}/test-requirements.txt
 [testenv:py3]
 basepython = python3
 
+[testenv:pyflakes]
+basepython = python3
+commands = {envpython} -m pyflakes {posargs:cloudinit/ tests/ tools/}
+
 # https://github.com/gabrielfalcao/HTTPretty/issues/223
 setenv =
     LC_ALL = en_US.utf-8
-- 
cgit v1.2.3


From 96f1742b36241cee152aa2cb5b4a5e1a267a4770 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 3 Mar 2016 15:17:24 -0500
Subject: fix lxd module to not do anything unless config provided

---
 cloudinit/config/cc_lxd.py                       | 30 ++++++++++++------------
 tests/unittests/test_handler/test_handler_lxd.py | 16 +++++++++++++
 2 files changed, 31 insertions(+), 15 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 84eec7a5..80a4d219 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -47,22 +47,24 @@ def handle(name, cfg, cloud, log, args):
         return
 
     init_cfg = lxd_cfg.get('init')
-    if not init_cfg:
-        init_cfg = {}
-
     if not isinstance(init_cfg, dict):
         log.warn("lxd/init config must be a dictionary. found a '%s'",
                   type(init_cfg))
         init_cfg = {}
 
-    packages = []
-    if (init_cfg.get("storage_backend") == "zfs" and not util.which('zfs')):
-       packages.append('zfs')
+    if not init_cfg:
+        log.debug("no lxd/init config. disabled.")
+        return
 
+    packages = []
     # Ensure lxd is installed
     if not util.which("lxd"):
         packages.append('lxd')
-    
+
+    # if using zfs, get the utils
+    if (init_cfg.get("storage_backend") == "zfs" and not util.which('zfs')):
+        packages.append('zfs')
+
     if len(packages):
         try:
             cloud.distro.install_packages(packages)
@@ -75,11 +77,9 @@ def handle(name, cfg, cloud, log, args):
         'network_address', 'network_port', 'storage_backend',
         'storage_create_device', 'storage_create_loop',
         'storage_pool', 'trust_password')
-
-    if init_cfg:
-        cmd = ['lxd', 'init', '--auto']
-        for k in init_keys:
-            if init_cfg.get(k):
-                cmd.extend(["--%s=%s" %
-                            (k.replace('_', '-'), str(init_cfg[k]))])
-        util.subp(cmd)
+    cmd = ['lxd', 'init', '--auto']
+    for k in init_keys:
+        if init_cfg.get(k):
+            cmd.extend(["--%s=%s" %
+                        (k.replace('_', '-'), str(init_cfg[k]))])
+    util.subp(cmd)
diff --git a/tests/unittests/test_handler/test_handler_lxd.py b/tests/unittests/test_handler/test_handler_lxd.py
index 65794a41..7ffa2a53 100644
--- a/tests/unittests/test_handler/test_handler_lxd.py
+++ b/tests/unittests/test_handler/test_handler_lxd.py
@@ -57,3 +57,19 @@ class TestLxd(t_help.TestCase):
         self.assertTrue(cc.distro.install_packages.called)
         install_pkg = cc.distro.install_packages.call_args_list[0][0][0]
         self.assertEquals(sorted(install_pkg), ['lxd', 'zfs'])
+
+    @mock.patch("cloudinit.config.cc_lxd.util")
+    def test_no_init_does_nothing(self, mock_util):
+        cc = self._get_cloud('ubuntu')
+        cc.distro = mock.MagicMock()
+        cc_lxd.handle('cc_lxd', {'lxd': {}}, cc, LOG, [])
+        self.assertFalse(cc.distro.install_packages.called)
+        self.assertFalse(mock_util.subp.called)
+
+    @mock.patch("cloudinit.config.cc_lxd.util")
+    def test_no_lxd_does_nothing(self, mock_util):
+        cc = self._get_cloud('ubuntu')
+        cc.distro = mock.MagicMock()
+        cc_lxd.handle('cc_lxd', {'package_update': True}, cc, LOG, [])
+        self.assertFalse(cc.distro.install_packages.called)
+        self.assertFalse(mock_util.subp.called)
-- 
cgit v1.2.3


From cb64cf1e14a474794654f5d1586b117912bed4f9 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 3 Mar 2016 15:21:48 -0500
Subject: fix some of pylints complaints

---
 cloudinit/config/cc_lxd.py | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 80a4d219..63b8fb63 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -49,7 +49,7 @@ def handle(name, cfg, cloud, log, args):
     init_cfg = lxd_cfg.get('init')
     if not isinstance(init_cfg, dict):
         log.warn("lxd/init config must be a dictionary. found a '%s'",
-                  type(init_cfg))
+                 type(init_cfg))
         init_cfg = {}
 
     if not init_cfg:
@@ -62,14 +62,14 @@ def handle(name, cfg, cloud, log, args):
         packages.append('lxd')
 
     # if using zfs, get the utils
-    if (init_cfg.get("storage_backend") == "zfs" and not util.which('zfs')):
+    if init_cfg.get("storage_backend") == "zfs" and not util.which('zfs'):
         packages.append('zfs')
 
     if len(packages):
         try:
             cloud.distro.install_packages(packages)
-        except util.ProcessExecutionError as e:
-            log.warn("failed to install packages %s: %s", packages, e)
+        except util.ProcessExecutionError as exc:
+            log.warn("failed to install packages %s: %s", packages, exc)
             return
 
     # Set up lxd if init config is given
-- 
cgit v1.2.3


From 8cb7c3f7b5339e686bfbf95996b51afafeaf9c9e Mon Sep 17 00:00:00 2001
From: Ryan Harper <ryan.harper@canonical.com>
Date: Thu, 3 Mar 2016 16:20:10 -0600
Subject: Update pep8 runner and fix pep8 issues

---
 Makefile                                           |  9 ++--
 bin/cloud-init                                     | 43 +++++++++---------
 cloudinit/config/cc_apt_configure.py               |  6 ++-
 cloudinit/config/cc_disk_setup.py                  | 31 +++++++------
 cloudinit/config/cc_grub_dpkg.py                   |  8 ++--
 cloudinit/config/cc_keys_to_console.py             |  2 +-
 cloudinit/config/cc_lxd.py                         |  2 +-
 cloudinit/config/cc_mounts.py                      | 12 ++---
 cloudinit/config/cc_power_state_change.py          |  2 +-
 cloudinit/config/cc_puppet.py                      |  6 +--
 cloudinit/config/cc_resizefs.py                    |  2 +-
 cloudinit/config/cc_rh_subscription.py             |  4 +-
 cloudinit/config/cc_set_hostname.py                |  2 +-
 cloudinit/config/cc_ssh.py                         |  7 +--
 cloudinit/config/cc_update_etc_hosts.py            |  6 +--
 cloudinit/config/cc_update_hostname.py             |  2 +-
 cloudinit/config/cc_yum_add_repo.py                |  2 +-
 cloudinit/distros/__init__.py                      | 12 ++---
 cloudinit/distros/arch.py                          |  6 +--
 cloudinit/distros/debian.py                        |  5 ++-
 cloudinit/distros/freebsd.py                       |  4 +-
 cloudinit/distros/gentoo.py                        |  4 +-
 cloudinit/distros/parsers/hostname.py              |  2 +-
 cloudinit/distros/parsers/resolv_conf.py           |  2 +-
 cloudinit/distros/parsers/sys_conf.py              |  7 ++-
 cloudinit/filters/launch_index.py                  |  2 +-
 cloudinit/helpers.py                               |  7 +--
 cloudinit/sources/DataSourceAzure.py               | 21 +++++----
 cloudinit/sources/DataSourceConfigDrive.py         |  2 +-
 cloudinit/sources/DataSourceEc2.py                 | 10 ++---
 cloudinit/sources/DataSourceMAAS.py                | 15 ++++---
 cloudinit/sources/DataSourceOVF.py                 |  4 +-
 cloudinit/sources/DataSourceOpenNebula.py          |  3 +-
 cloudinit/sources/DataSourceSmartOS.py             |  7 ++-
 cloudinit/ssh_util.py                              |  3 +-
 cloudinit/stages.py                                | 18 ++++----
 cloudinit/url_helper.py                            |  6 +--
 cloudinit/util.py                                  | 15 ++++---
 tests/unittests/test_data.py                       |  5 ++-
 tests/unittests/test_datasource/test_altcloud.py   | 23 +++++-----
 tests/unittests/test_datasource/test_azure.py      | 15 ++++---
 .../unittests/test_datasource/test_configdrive.py  | 12 ++---
 tests/unittests/test_datasource/test_maas.py       | 16 +++----
 tests/unittests/test_datasource/test_smartos.py    |  6 +--
 .../test_handler/test_handler_power_state.py       |  3 +-
 .../test_handler/test_handler_seed_random.py       |  3 +-
 .../unittests/test_handler/test_handler_snappy.py  |  3 +-
 tests/unittests/test_sshutil.py                    |  3 +-
 tests/unittests/test_templating.py                 |  3 +-
 tools/hacking.py                                   | 16 +++----
 tools/mock-meta.py                                 | 27 +++++++-----
 tools/run-pep8                                     | 51 ++++++++--------------
 52 files changed, 244 insertions(+), 243 deletions(-)

(limited to 'cloudinit')

diff --git a/Makefile b/Makefile
index 058ac199..fb65b70b 100644
--- a/Makefile
+++ b/Makefile
@@ -20,13 +20,14 @@ all: test check_version
 check: pep8 pyflakes pyflakes3 unittest
 
 pep8:
-	@$(CWD)/tools/run-pep8 $(PY_FILES)
+	@$(CWD)/tools/run-pep8
 
 pyflakes:
-	@$(CWD)/tools/tox-venv py27 pyflakes $(PY_FILES)
+	@$(CWD)/tools/run-pyflakes
 
-pyflakes:
-	@$(CWD)/tools/tox-venv py34 pyflakes $(PY_FILES)
+pyflakes3:
+	@$(CWD)/tools/run-pyflakes3
+	
 
 unittest:
 	nosetests $(noseopts) tests/unittests
diff --git a/bin/cloud-init b/bin/cloud-init
index 9b90c45e..7f665e7e 100755
--- a/bin/cloud-init
+++ b/bin/cloud-init
@@ -194,7 +194,7 @@ def main_init(name, args):
     if args.debug:
         # Reset so that all the debug handlers are closed out
         LOG.debug(("Logging being reset, this logger may no"
-                    " longer be active shortly"))
+                   " longer be active shortly"))
         logging.resetLogging()
     logging.setupLogging(init.cfg)
     apply_reporting_cfg(init.cfg)
@@ -276,9 +276,9 @@ def main_init(name, args):
         # This may run user-data handlers and/or perform
         # url downloads and such as needed.
         (ran, _results) = init.cloudify().run('consume_data',
-                                             init.consume_data,
-                                             args=[PER_INSTANCE],
-                                             freq=PER_INSTANCE)
+                                              init.consume_data,
+                                              args=[PER_INSTANCE],
+                                              freq=PER_INSTANCE)
         if not ran:
             # Just consume anything that is set to run per-always
             # if nothing ran in the per-instance code
@@ -349,7 +349,7 @@ def main_modules(action_name, args):
     if args.debug:
         # Reset so that all the debug handlers are closed out
         LOG.debug(("Logging being reset, this logger may no"
-                    " longer be active shortly"))
+                   " longer be active shortly"))
         logging.resetLogging()
     logging.setupLogging(mods.cfg)
     apply_reporting_cfg(init.cfg)
@@ -534,7 +534,8 @@ def status_wrapper(name, args, data_d=None, link_d=None):
                 errors.extend(v1[m].get('errors', []))
 
         atomic_write_json(result_path,
-            {'v1': {'datasource': v1['datasource'], 'errors': errors}})
+                          {'v1': {'datasource': v1['datasource'],
+                                  'errors': errors}})
         util.sym_link(os.path.relpath(result_path, link_d), result_link,
                       force=True)
 
@@ -578,13 +579,13 @@ def main():
 
     # These settings are used for the 'config' and 'final' stages
     parser_mod = subparsers.add_parser('modules',
-                                      help=('activates modules '
-                                            'using a given configuration key'))
+                                       help=('activates modules using '
+                                             'a given configuration key'))
     parser_mod.add_argument("--mode", '-m', action='store',
-                             help=("module configuration name "
-                                    "to use (default: %(default)s)"),
-                             default='config',
-                             choices=('init', 'config', 'final'))
+                            help=("module configuration name "
+                                  "to use (default: %(default)s)"),
+                            default='config',
+                            choices=('init', 'config', 'final'))
     parser_mod.set_defaults(action=('modules', main_modules))
 
     # These settings are used when you want to query information
@@ -600,22 +601,22 @@ def main():
 
     # This subcommand allows you to run a single module
     parser_single = subparsers.add_parser('single',
-                                         help=('run a single module '))
+                                          help=('run a single module '))
     parser_single.set_defaults(action=('single', main_single))
     parser_single.add_argument("--name", '-n', action="store",
-                              help="module name to run",
-                              required=True)
+                               help="module name to run",
+                               required=True)
     parser_single.add_argument("--frequency", action="store",
-                              help=("frequency of the module"),
-                              required=False,
-                              choices=list(FREQ_SHORT_NAMES.keys()))
+                               help=("frequency of the module"),
+                               required=False,
+                               choices=list(FREQ_SHORT_NAMES.keys()))
     parser_single.add_argument("--report", action="store_true",
                                help="enable reporting",
                                required=False)
     parser_single.add_argument("module_args", nargs="*",
-                              metavar='argument',
-                              help=('any additional arguments to'
-                                    ' pass to this module'))
+                               metavar='argument',
+                               help=('any additional arguments to'
+                                     ' pass to this module'))
     parser_single.set_defaults(action=('single', main_single))
 
     args = parser.parse_args()
diff --git a/cloudinit/config/cc_apt_configure.py b/cloudinit/config/cc_apt_configure.py
index 9e9e9e26..702977cb 100644
--- a/cloudinit/config/cc_apt_configure.py
+++ b/cloudinit/config/cc_apt_configure.py
@@ -91,7 +91,8 @@ def handle(name, cfg, cloud, log, _args):
         if matchcfg:
             matcher = re.compile(matchcfg).search
         else:
-            matcher = lambda f: False
+            def matcher(x):
+                return False
 
         errors = add_sources(cfg['apt_sources'], params,
                              aa_repo_match=matcher)
@@ -173,7 +174,8 @@ def add_sources(srclist, template_params=None, aa_repo_match=None):
         template_params = {}
 
     if aa_repo_match is None:
-        aa_repo_match = lambda f: False
+        def aa_repo_match(x):
+            return False
 
     errorlist = []
     for ent in srclist:
diff --git a/cloudinit/config/cc_disk_setup.py b/cloudinit/config/cc_disk_setup.py
index d5b0d1d7..0ecc2e4c 100644
--- a/cloudinit/config/cc_disk_setup.py
+++ b/cloudinit/config/cc_disk_setup.py
@@ -167,11 +167,12 @@ def enumerate_disk(device, nodeps=False):
     parts = [x for x in (info.strip()).splitlines() if len(x.split()) > 0]
 
     for part in parts:
-        d = {'name': None,
-             'type': None,
-             'fstype': None,
-             'label': None,
-            }
+        d = {
+            'name': None,
+            'type': None,
+            'fstype': None,
+            'label': None,
+        }
 
         for key, value in value_splitter(part):
             d[key.lower()] = value
@@ -701,11 +702,12 @@ def lookup_force_flag(fs):
     """
     A force flag might be -F or -F, this look it up
     """
-    flags = {'ext': '-F',
-             'btrfs': '-f',
-             'xfs': '-f',
-             'reiserfs': '-f',
-            }
+    flags = {
+        'ext': '-F',
+        'btrfs': '-f',
+        'xfs': '-f',
+        'reiserfs': '-f',
+    }
 
     if 'ext' in fs.lower():
         fs = 'ext'
@@ -824,10 +826,11 @@ def mkfs(fs_cfg):
 
     # Create the commands
     if fs_cmd:
-        fs_cmd = fs_cfg['cmd'] % {'label': label,
-                                  'filesystem': fs_type,
-                                  'device': device,
-                                 }
+        fs_cmd = fs_cfg['cmd'] % {
+            'label': label,
+            'filesystem': fs_type,
+            'device': device,
+        }
     else:
         # Find the mkfs command
         mkfs_cmd = util.which("mkfs.%s" % fs_type)
diff --git a/cloudinit/config/cc_grub_dpkg.py b/cloudinit/config/cc_grub_dpkg.py
index 456597af..acd3e60a 100644
--- a/cloudinit/config/cc_grub_dpkg.py
+++ b/cloudinit/config/cc_grub_dpkg.py
@@ -38,11 +38,11 @@ def handle(name, cfg, _cloud, log, _args):
 
     idevs = util.get_cfg_option_str(mycfg, "grub-pc/install_devices", None)
     idevs_empty = util.get_cfg_option_str(mycfg,
-        "grub-pc/install_devices_empty", None)
+                                          "grub-pc/install_devices_empty",
+                                          None)
 
     if ((os.path.exists("/dev/sda1") and not os.path.exists("/dev/sda")) or
-            (os.path.exists("/dev/xvda1")
-            and not os.path.exists("/dev/xvda"))):
+       (os.path.exists("/dev/xvda1") and not os.path.exists("/dev/xvda"))):
         if idevs is None:
             idevs = ""
         if idevs_empty is None:
@@ -66,7 +66,7 @@ def handle(name, cfg, _cloud, log, _args):
                  (idevs, idevs_empty))
 
     log.debug("Setting grub debconf-set-selections with '%s','%s'" %
-        (idevs, idevs_empty))
+              (idevs, idevs_empty))
 
     try:
         util.subp(['debconf-set-selections'], dconf_sel)
diff --git a/cloudinit/config/cc_keys_to_console.py b/cloudinit/config/cc_keys_to_console.py
index f1c1adff..aa844ee9 100644
--- a/cloudinit/config/cc_keys_to_console.py
+++ b/cloudinit/config/cc_keys_to_console.py
@@ -48,7 +48,7 @@ def handle(name, cfg, cloud, log, _args):
                                             "ssh_fp_console_blacklist", [])
     key_blacklist = util.get_cfg_option_list(cfg,
                                              "ssh_key_console_blacklist",
-                                              ["ssh-dss"])
+                                             ["ssh-dss"])
 
     try:
         cmd = [helper_path]
diff --git a/cloudinit/config/cc_lxd.py b/cloudinit/config/cc_lxd.py
index 7d8a0202..e2fdf68e 100644
--- a/cloudinit/config/cc_lxd.py
+++ b/cloudinit/config/cc_lxd.py
@@ -59,7 +59,7 @@ def handle(name, cfg, cloud, log, args):
     if init_cfg:
         if not isinstance(init_cfg, dict):
             log.warn("lxd/init config must be a dictionary. found a '%s'",
-                      type(init_cfg))
+                     type(init_cfg))
             return
         cmd = ['lxd', 'init', '--auto']
         for k in init_keys:
diff --git a/cloudinit/config/cc_mounts.py b/cloudinit/config/cc_mounts.py
index 11089d8d..4fe3ee21 100644
--- a/cloudinit/config/cc_mounts.py
+++ b/cloudinit/config/cc_mounts.py
@@ -204,12 +204,12 @@ def setup_swapfile(fname, size=None, maxsize=None):
     try:
         util.ensure_dir(tdir)
         util.log_time(LOG.debug, msg, func=util.subp,
-            args=[['sh', '-c',
-                   ('rm -f "$1" && umask 0066 && '
-                    '{ fallocate -l "${2}M" "$1" || '
-                    '  dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
-                    'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
-                   'setup_swap', fname, mbsize]])
+                      args=[['sh', '-c',
+                            ('rm -f "$1" && umask 0066 && '
+                             '{ fallocate -l "${2}M" "$1" || '
+                             ' dd if=/dev/zero "of=$1" bs=1M "count=$2"; } && '
+                             'mkswap "$1" || { r=$?; rm -f "$1"; exit $r; }'),
+                             'setup_swap', fname, mbsize]])
 
     except Exception as e:
         raise IOError("Failed %s: %s" % (msg, e))
diff --git a/cloudinit/config/cc_power_state_change.py b/cloudinit/config/cc_power_state_change.py
index 7d9567e3..cc3f7f70 100644
--- a/cloudinit/config/cc_power_state_change.py
+++ b/cloudinit/config/cc_power_state_change.py
@@ -105,7 +105,7 @@ def handle(_name, cfg, _cloud, log, _args):
 
     log.debug("After pid %s ends, will execute: %s" % (mypid, ' '.join(args)))
 
-    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log, 
+    util.fork_cb(run_after_pid_gone, mypid, cmdline, timeout, log,
                  condition, execmd, [args, devnull_fp])
 
 
diff --git a/cloudinit/config/cc_puppet.py b/cloudinit/config/cc_puppet.py
index 4501598e..774d3322 100644
--- a/cloudinit/config/cc_puppet.py
+++ b/cloudinit/config/cc_puppet.py
@@ -36,8 +36,8 @@ def _autostart_puppet(log):
     # Set puppet to automatically start
     if os.path.exists('/etc/default/puppet'):
         util.subp(['sed', '-i',
-                  '-e', 's/^START=.*/START=yes/',
-                  '/etc/default/puppet'], capture=False)
+                   '-e', 's/^START=.*/START=yes/',
+                   '/etc/default/puppet'], capture=False)
     elif os.path.exists('/bin/systemctl'):
         util.subp(['/bin/systemctl', 'enable', 'puppet.service'],
                   capture=False)
@@ -65,7 +65,7 @@ def handle(name, cfg, cloud, log, _args):
                   " doing nothing."))
     elif install:
         log.debug(("Attempting to install puppet %s,"),
-                   version if version else 'latest')
+                  version if version else 'latest')
         cloud.distro.install_packages(('puppet', version))
 
     # ... and then update the puppet configuration
diff --git a/cloudinit/config/cc_resizefs.py b/cloudinit/config/cc_resizefs.py
index cbc07853..2a2a9f59 100644
--- a/cloudinit/config/cc_resizefs.py
+++ b/cloudinit/config/cc_resizefs.py
@@ -166,7 +166,7 @@ def handle(name, cfg, _cloud, log, args):
             func=do_resize, args=(resize_cmd, log))
     else:
         util.log_time(logfunc=log.debug, msg="Resizing",
-            func=do_resize, args=(resize_cmd, log))
+                      func=do_resize, args=(resize_cmd, log))
 
     action = 'Resized'
     if resize_root == NOBLOCK:
diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 3b30c47e..6f474aed 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -127,8 +127,8 @@ class SubscriptionManager(object):
             return False, not_bool
 
         if (self.servicelevel is not None) and \
-                ((not self.auto_attach)
-                 or (util.is_false(str(self.auto_attach)))):
+           ((not self.auto_attach) or
+           (util.is_false(str(self.auto_attach)))):
 
             no_auto = ("The service-level key must be used in conjunction "
                        "with the auto-attach key.  Please re-run with "
diff --git a/cloudinit/config/cc_set_hostname.py b/cloudinit/config/cc_set_hostname.py
index 5d7f4331..f43d8d5a 100644
--- a/cloudinit/config/cc_set_hostname.py
+++ b/cloudinit/config/cc_set_hostname.py
@@ -24,7 +24,7 @@ from cloudinit import util
 def handle(name, cfg, cloud, log, _args):
     if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
         log.debug(("Configuration option 'preserve_hostname' is set,"
-                    " not setting the hostname in module %s"), name)
+                   " not setting the hostname in module %s"), name)
         return
 
     (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_ssh.py b/cloudinit/config/cc_ssh.py
index 5bd2dec6..d24e43c0 100644
--- a/cloudinit/config/cc_ssh.py
+++ b/cloudinit/config/cc_ssh.py
@@ -30,9 +30,10 @@ from cloudinit import distros as ds
 from cloudinit import ssh_util
 from cloudinit import util
 
-DISABLE_ROOT_OPTS = ("no-port-forwarding,no-agent-forwarding,"
-"no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\" "
-"rather than the user \\\"root\\\".\';echo;sleep 10\"")
+DISABLE_ROOT_OPTS = (
+    "no-port-forwarding,no-agent-forwarding,"
+    "no-X11-forwarding,command=\"echo \'Please login as the user \\\"$USER\\\""
+    " rather than the user \\\"root\\\".\';echo;sleep 10\"")
 
 GENERATE_KEY_NAMES = ['rsa', 'dsa', 'ecdsa', 'ed25519']
 KEY_FILE_TPL = '/etc/ssh/ssh_host_%s_key'
diff --git a/cloudinit/config/cc_update_etc_hosts.py b/cloudinit/config/cc_update_etc_hosts.py
index d3dd1f32..15703efe 100644
--- a/cloudinit/config/cc_update_etc_hosts.py
+++ b/cloudinit/config/cc_update_etc_hosts.py
@@ -41,10 +41,10 @@ def handle(name, cfg, cloud, log, _args):
         if not tpl_fn_name:
             raise RuntimeError(("No hosts template could be"
                                 " found for distro %s") %
-                                (cloud.distro.osfamily))
+                               (cloud.distro.osfamily))
 
         templater.render_to_file(tpl_fn_name, '/etc/hosts',
-                                {'hostname': hostname, 'fqdn': fqdn})
+                                 {'hostname': hostname, 'fqdn': fqdn})
 
     elif manage_hosts == "localhost":
         (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
@@ -57,4 +57,4 @@ def handle(name, cfg, cloud, log, _args):
         cloud.distro.update_etc_hosts(hostname, fqdn)
     else:
         log.debug(("Configuration option 'manage_etc_hosts' is not set,"
-                    " not managing /etc/hosts in module %s"), name)
+                   " not managing /etc/hosts in module %s"), name)
diff --git a/cloudinit/config/cc_update_hostname.py b/cloudinit/config/cc_update_hostname.py
index e396ba13..5b78afe1 100644
--- a/cloudinit/config/cc_update_hostname.py
+++ b/cloudinit/config/cc_update_hostname.py
@@ -29,7 +29,7 @@ frequency = PER_ALWAYS
 def handle(name, cfg, cloud, log, _args):
     if util.get_cfg_option_bool(cfg, "preserve_hostname", False):
         log.debug(("Configuration option 'preserve_hostname' is set,"
-                    " not updating the hostname in module %s"), name)
+                   " not updating the hostname in module %s"), name)
         return
 
     (hostname, fqdn) = util.get_hostname_fqdn(cfg, cloud)
diff --git a/cloudinit/config/cc_yum_add_repo.py b/cloudinit/config/cc_yum_add_repo.py
index 3b821af9..64fba869 100644
--- a/cloudinit/config/cc_yum_add_repo.py
+++ b/cloudinit/config/cc_yum_add_repo.py
@@ -92,7 +92,7 @@ def handle(name, cfg, _cloud, log, _args):
         for req_field in ['baseurl']:
             if req_field not in repo_config:
                 log.warn(("Repository %s does not contain a %s"
-                           " configuration 'required' entry"),
+                          " configuration 'required' entry"),
                          repo_id, req_field)
                 missing_required += 1
         if not missing_required:
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index 71884b32..661a9fd2 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -211,8 +211,8 @@ class Distro(object):
 
         # If the system hostname is different than the previous
         # one or the desired one lets update it as well
-        if (not sys_hostname) or (sys_hostname == prev_hostname
-                                  and sys_hostname != hostname):
+        if ((not sys_hostname) or (sys_hostname == prev_hostname and
+           sys_hostname != hostname)):
             update_files.append(sys_fn)
 
         # If something else has changed the hostname after we set it
@@ -221,7 +221,7 @@ class Distro(object):
         if (sys_hostname and prev_hostname and
                 sys_hostname != prev_hostname):
             LOG.info("%s differs from %s, assuming user maintained hostname.",
-                       prev_hostname_fn, sys_fn)
+                     prev_hostname_fn, sys_fn)
             return
 
         # Remove duplicates (incase the previous config filename)
@@ -289,7 +289,7 @@ class Distro(object):
     def _bring_up_interface(self, device_name):
         cmd = ['ifup', device_name]
         LOG.debug("Attempting to run bring up interface %s using command %s",
-                   device_name, cmd)
+                  device_name, cmd)
         try:
             (_out, err) = util.subp(cmd)
             if len(err):
@@ -548,7 +548,7 @@ class Distro(object):
             for member in members:
                 if not util.is_user(member):
                     LOG.warn("Unable to add group member '%s' to group '%s'"
-                            "; user does not exist.", member, name)
+                             "; user does not exist.", member, name)
                     continue
 
                 util.subp(['usermod', '-a', '-G', name, member])
@@ -886,7 +886,7 @@ def fetch(name):
     locs, looked_locs = importer.find_module(name, ['', __name__], ['Distro'])
     if not locs:
         raise ImportError("No distribution found for distro %s (searched %s)"
-                           % (name, looked_locs))
+                          % (name, looked_locs))
     mod = importer.import_module(locs[0])
     cls = getattr(mod, 'Distro')
     return cls
diff --git a/cloudinit/distros/arch.py b/cloudinit/distros/arch.py
index 45fcf26f..93a2e008 100644
--- a/cloudinit/distros/arch.py
+++ b/cloudinit/distros/arch.py
@@ -74,7 +74,7 @@ class Distro(distros.Distro):
                 'Interface': dev,
                 'IP': info.get('bootproto'),
                 'Address': "('%s/%s')" % (info.get('address'),
-                        info.get('netmask')),
+                                          info.get('netmask')),
                 'Gateway': info.get('gateway'),
                 'DNS': str(tuple(info.get('dns-nameservers'))).replace(',', '')
             }
@@ -86,7 +86,7 @@ class Distro(distros.Distro):
 
         if nameservers:
             util.write_file(self.resolve_conf_fn,
-                    convert_resolv_conf(nameservers))
+                            convert_resolv_conf(nameservers))
 
         return dev_names
 
@@ -102,7 +102,7 @@ class Distro(distros.Distro):
     def _bring_up_interface(self, device_name):
         cmd = ['netctl', 'restart', device_name]
         LOG.debug("Attempting to run bring up interface %s using command %s",
-                   device_name, cmd)
+                  device_name, cmd)
         try:
             (_out, err) = util.subp(cmd)
             if len(err):
diff --git a/cloudinit/distros/debian.py b/cloudinit/distros/debian.py
index 6d3a82bf..db5890b1 100644
--- a/cloudinit/distros/debian.py
+++ b/cloudinit/distros/debian.py
@@ -159,8 +159,9 @@ class Distro(distros.Distro):
 
         # Allow the output of this to flow outwards (ie not be captured)
         util.log_time(logfunc=LOG.debug,
-            msg="apt-%s [%s]" % (command, ' '.join(cmd)), func=util.subp,
-            args=(cmd,), kwargs={'env': e, 'capture': False})
+                      msg="apt-%s [%s]" % (command, ' '.join(cmd)),
+                      func=util.subp,
+                      args=(cmd,), kwargs={'env': e, 'capture': False})
 
     def update_package_sources(self):
         self._runner.run("update-sources", self.package_command,
diff --git a/cloudinit/distros/freebsd.py b/cloudinit/distros/freebsd.py
index 4c484639..72012056 100644
--- a/cloudinit/distros/freebsd.py
+++ b/cloudinit/distros/freebsd.py
@@ -205,8 +205,8 @@ class Distro(distros.Distro):
         redact_opts = ['passwd']
 
         for key, val in kwargs.items():
-            if (key in adduser_opts and val
-                    and isinstance(val, six.string_types)):
+            if (key in adduser_opts and val and
+               isinstance(val, six.string_types)):
                 adduser_cmd.extend([adduser_opts[key], val])
 
                 # Redact certain fields from the logs
diff --git a/cloudinit/distros/gentoo.py b/cloudinit/distros/gentoo.py
index 9e80583c..6267dd6e 100644
--- a/cloudinit/distros/gentoo.py
+++ b/cloudinit/distros/gentoo.py
@@ -66,7 +66,7 @@ class Distro(distros.Distro):
     def _bring_up_interface(self, device_name):
         cmd = ['/etc/init.d/net.%s' % device_name, 'restart']
         LOG.debug("Attempting to run bring up interface %s using command %s",
-                   device_name, cmd)
+                  device_name, cmd)
         try:
             (_out, err) = util.subp(cmd)
             if len(err):
@@ -88,7 +88,7 @@ class Distro(distros.Distro):
                 (_out, err) = util.subp(cmd)
                 if len(err):
                     LOG.warn("Running %s resulted in stderr output: %s", cmd,
-                            err)
+                             err)
             except util.ProcessExecutionError:
                 util.logexc(LOG, "Running interface command %s failed", cmd)
                 return False
diff --git a/cloudinit/distros/parsers/hostname.py b/cloudinit/distros/parsers/hostname.py
index 84a1de42..efb185d4 100644
--- a/cloudinit/distros/parsers/hostname.py
+++ b/cloudinit/distros/parsers/hostname.py
@@ -84,5 +84,5 @@ class HostnameConf(object):
             hostnames_found.add(head)
         if len(hostnames_found) > 1:
             raise IOError("Multiple hostnames (%s) found!"
-                           % (hostnames_found))
+                          % (hostnames_found))
         return entries
diff --git a/cloudinit/distros/parsers/resolv_conf.py b/cloudinit/distros/parsers/resolv_conf.py
index 8aee03a4..2ed13d9c 100644
--- a/cloudinit/distros/parsers/resolv_conf.py
+++ b/cloudinit/distros/parsers/resolv_conf.py
@@ -132,7 +132,7 @@ class ResolvConf(object):
             # Some hard limit on 256 chars total
             raise ValueError(("Adding %r would go beyond the "
                               "256 maximum search list character limit")
-                              % (search_domain))
+                             % (search_domain))
         self._remove_option('search')
         self._contents.append(('option', ['search', s_list, '']))
         return flat_sds
diff --git a/cloudinit/distros/parsers/sys_conf.py b/cloudinit/distros/parsers/sys_conf.py
index d795e12f..6157cf32 100644
--- a/cloudinit/distros/parsers/sys_conf.py
+++ b/cloudinit/distros/parsers/sys_conf.py
@@ -77,8 +77,7 @@ class SysConf(configobj.ConfigObj):
         quot_func = None
         if value[0] in ['"', "'"] and value[-1] in ['"', "'"]:
             if len(value) == 1:
-                quot_func = (lambda x:
-                                self._get_single_quote(x) % x)
+                quot_func = (lambda x: self._get_single_quote(x) % x)
         else:
             # Quote whitespace if it isn't the start + end of a shell command
             if value.strip().startswith("$(") and value.strip().endswith(")"):
@@ -91,10 +90,10 @@ class SysConf(configobj.ConfigObj):
                         # to use single quotes which won't get expanded...
                         if re.search(r"[\n\"']", value):
                             quot_func = (lambda x:
-                                            self._get_triple_quote(x) % x)
+                                         self._get_triple_quote(x) % x)
                         else:
                             quot_func = (lambda x:
-                                            self._get_single_quote(x) % x)
+                                         self._get_single_quote(x) % x)
                     else:
                         quot_func = pipes.quote
         if not quot_func:
diff --git a/cloudinit/filters/launch_index.py b/cloudinit/filters/launch_index.py
index 5bebd318..baecdac9 100644
--- a/cloudinit/filters/launch_index.py
+++ b/cloudinit/filters/launch_index.py
@@ -61,7 +61,7 @@ class Filter(object):
                     discarded += 1
             LOG.debug(("Discarding %s multipart messages "
                        "which do not match launch index %s"),
-                       discarded, self.wanted_idx)
+                      discarded, self.wanted_idx)
             new_message = copy.copy(message)
             new_message.set_payload(new_msgs)
             new_message[ud.ATTACHMENT_FIELD] = str(len(new_msgs))
diff --git a/cloudinit/helpers.py b/cloudinit/helpers.py
index 5e99d185..a6eb20fe 100644
--- a/cloudinit/helpers.py
+++ b/cloudinit/helpers.py
@@ -139,9 +139,10 @@ class FileSemaphores(object):
         # but the item had run before we did canon_sem_name.
         if cname != name and os.path.exists(self._get_path(name, freq)):
             LOG.warn("%s has run without canonicalized name [%s].\n"
-                "likely the migrator has not yet run. It will run next boot.\n"
-                "run manually with: cloud-init single --name=migrator"
-                % (name, cname))
+                     "likely the migrator has not yet run. "
+                     "It will run next boot.\n"
+                     "run manually with: cloud-init single --name=migrator"
+                     % (name, cname))
             return True
 
         return False
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index bd80a8a6..b03ab895 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -38,7 +38,8 @@ LOG = logging.getLogger(__name__)
 DS_NAME = 'Azure'
 DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
 AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = ['sh', '-xc',
+BOUNCE_COMMAND = [
+    'sh', '-xc',
     "i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
 
 BUILTIN_DS_CONFIG = {
@@ -91,9 +92,9 @@ def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
     """
     policy = cfg['hostname_bounce']['policy']
     previous_hostname = get_hostname(hostname_command)
-    if (not util.is_true(cfg.get('set_hostname'))
-            or util.is_false(policy)
-            or (previous_hostname == temp_hostname and policy != 'force')):
+    if (not util.is_true(cfg.get('set_hostname')) or
+       util.is_false(policy) or
+       (previous_hostname == temp_hostname and policy != 'force')):
         yield None
         return
     set_hostname(temp_hostname, hostname_command)
@@ -123,8 +124,8 @@ class DataSourceAzureNet(sources.DataSource):
         with temporary_hostname(temp_hostname, self.ds_cfg,
                                 hostname_command=hostname_command) \
                 as previous_hostname:
-            if (previous_hostname is not None
-                    and util.is_true(self.ds_cfg.get('set_hostname'))):
+            if (previous_hostname is not None and
+               util.is_true(self.ds_cfg.get('set_hostname'))):
                 cfg = self.ds_cfg['hostname_bounce']
                 try:
                     perform_hostname_bounce(hostname=temp_hostname,
@@ -152,7 +153,8 @@ class DataSourceAzureNet(sources.DataSource):
                 else:
                     bname = str(pk['fingerprint'] + ".crt")
                     fp_files += [os.path.join(ddir, bname)]
-                    LOG.debug("ssh authentication: using fingerprint from fabirc")
+                    LOG.debug("ssh authentication: "
+                              "using fingerprint from fabirc")
 
             missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
                                     func=wait_for_files,
@@ -506,7 +508,7 @@ def read_azure_ovf(contents):
         raise BrokenAzureDataSource("invalid xml: %s" % e)
 
     results = find_child(dom.documentElement,
-        lambda n: n.localName == "ProvisioningSection")
+                         lambda n: n.localName == "ProvisioningSection")
 
     if len(results) == 0:
         raise NonAzureDataSource("No ProvisioningSection")
@@ -516,7 +518,8 @@ def read_azure_ovf(contents):
     provSection = results[0]
 
     lpcs_nodes = find_child(provSection,
-        lambda n: n.localName == "LinuxProvisioningConfigurationSet")
+                            lambda n:
+                            n.localName == "LinuxProvisioningConfigurationSet")
 
     if len(results) == 0:
         raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index eb474079..e3916208 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -39,7 +39,7 @@ FS_TYPES = ('vfat', 'iso9660')
 LABEL_TYPES = ('config-2',)
 POSSIBLE_MOUNTS = ('sr', 'cd')
 OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
-                  for i in range(0, 2)))
+                        for i in range(0, 2)))
 
 
 class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0032d06c..6a897f7d 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):
             if not self.wait_for_metadata_service():
                 return False
             start_time = time.time()
-            self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
-                self.metadata_address)
+            self.userdata_raw = \
+                ec2.get_instance_userdata(self.api_ver, self.metadata_address)
             self.metadata = ec2.get_instance_metadata(self.api_ver,
                                                       self.metadata_address)
             LOG.debug("Crawl of metadata service took %s seconds",
-                       int(time.time() - start_time))
+                      int(time.time() - start_time))
             return True
         except Exception:
             util.logexc(LOG, "Failed reading from metadata address %s",
@@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):
 
         start_time = time.time()
         url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
-                                timeout=timeout, status_cb=LOG.warn)
+                                 timeout=timeout, status_cb=LOG.warn)
 
         if url:
             LOG.debug("Using metadata source: '%s'", url2base[url])
         else:
             LOG.critical("Giving up on md from %s after %s seconds",
-                            urls, int(time.time() - start_time))
+                         urls, int(time.time() - start_time))
 
         self.metadata_address = url2base.get(url)
         return bool(url)
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index cfc59ca5..f18c4cee 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -275,17 +275,18 @@ if __name__ == "__main__":
 
         parser = argparse.ArgumentParser(description='Interact with MAAS DS')
         parser.add_argument("--config", metavar="file",
-            help="specify DS config file", default=None)
+                            help="specify DS config file", default=None)
         parser.add_argument("--ckey", metavar="key",
-            help="the consumer key to auth with", default=None)
+                            help="the consumer key to auth with", default=None)
         parser.add_argument("--tkey", metavar="key",
-            help="the token key to auth with", default=None)
+                            help="the token key to auth with", default=None)
         parser.add_argument("--csec", metavar="secret",
-            help="the consumer secret (likely '')", default="")
+                            help="the consumer secret (likely '')", default="")
         parser.add_argument("--tsec", metavar="secret",
-            help="the token secret to auth with", default=None)
+                            help="the token secret to auth with", default=None)
         parser.add_argument("--apiver", metavar="version",
-            help="the apiver to use ("" can be used)", default=MD_VERSION)
+                            help="the apiver to use ("" can be used)",
+                            default=MD_VERSION)
 
         subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
         subcmds.add_parser('crawl', help="crawl the datasource")
@@ -297,7 +298,7 @@ if __name__ == "__main__":
         args = parser.parse_args()
 
         creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
-            'token_secret': args.tsec, 'consumer_secret': args.csec}
+                 'token_secret': args.tsec, 'consumer_secret': args.csec}
 
         if args.config:
             cfg = util.read_conf(args.config)
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 58a4b2a2..adf9b12e 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -264,14 +264,14 @@ def get_properties(contents):
     # could also check here that elem.namespaceURI ==
     #   "http://schemas.dmtf.org/ovf/environment/1"
     propSections = find_child(dom.documentElement,
-        lambda n: n.localName == "PropertySection")
+                              lambda n: n.localName == "PropertySection")
 
     if len(propSections) == 0:
         raise XmlError("No 'PropertySection's")
 
     props = {}
     propElems = find_child(propSections[0],
-                            (lambda n: n.localName == "Property"))
+                           (lambda n: n.localName == "Property"))
 
     for elem in propElems:
         key = elem.attributes.getNamedItemNS(envNsURI, "key").value
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index ac2c3b45..b26940d1 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -404,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):
     if ssh_key_var:
         lines = context.get(ssh_key_var).splitlines()
         results['metadata']['public-keys'] = [l for l in lines
-            if len(l) and not l.startswith("#")]
+                                              if len(l) and not
+                                              l.startswith("#")]
 
     # custom hostname -- try hostname or leave cloud-init
     # itself create hostname from IP address later
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 7453379a..139ee52c 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -90,8 +90,7 @@ BUILTIN_DS_CONFIG = {
                          'user-data',
                          'user-script',
                          'sdc:datacenter_name',
-                         'sdc:uuid',
-                        ],
+                         'sdc:uuid'],
     'base64_keys': [],
     'base64_all': False,
     'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -450,7 +449,7 @@ class JoyentMetadataClient(object):
 
         response = bytearray()
         response.extend(self.metasource.read(1))
-        while response[-1:] !=  b'\n':
+        while response[-1:] != b'\n':
             response.extend(self.metasource.read(1))
         response = response.rstrip().decode('ascii')
         LOG.debug('Read "%s" from metadata transport.', response)
@@ -513,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,
 
         except Exception as e:
             util.logexc(LOG, ("Failed to identify script type for %s" %
-                             content_f, e))
+                              content_f, e))
 
     if link:
         try:
diff --git a/cloudinit/ssh_util.py b/cloudinit/ssh_util.py
index 9b2f5ed5..c74a7ae2 100644
--- a/cloudinit/ssh_util.py
+++ b/cloudinit/ssh_util.py
@@ -31,7 +31,8 @@ LOG = logging.getLogger(__name__)
 DEF_SSHD_CFG = "/etc/ssh/sshd_config"
 
 # taken from openssh source key.c/key_type_from_name
-VALID_KEY_TYPES = ("rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
+VALID_KEY_TYPES = (
+    "rsa", "dsa", "ssh-rsa", "ssh-dss", "ecdsa",
     "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
     "ssh-rsa-cert-v00@openssh.com", "ssh-dss-cert-v00@openssh.com",
     "ssh-rsa-cert-v01@openssh.com", "ssh-dss-cert-v01@openssh.com",
diff --git a/cloudinit/stages.py b/cloudinit/stages.py
index 9f192c8d..dbcf3d55 100644
--- a/cloudinit/stages.py
+++ b/cloudinit/stages.py
@@ -509,13 +509,13 @@ class Init(object):
     def consume_data(self, frequency=PER_INSTANCE):
         # Consume the userdata first, because we need want to let the part
         # handlers run first (for merging stuff)
-        with events.ReportEventStack(
-            "consume-user-data", "reading and applying user-data",
-            parent=self.reporter):
+        with events.ReportEventStack("consume-user-data",
+                                     "reading and applying user-data",
+                                     parent=self.reporter):
                 self._consume_userdata(frequency)
-        with events.ReportEventStack(
-            "consume-vendor-data", "reading and applying vendor-data",
-            parent=self.reporter):
+        with events.ReportEventStack("consume-vendor-data",
+                                     "reading and applying vendor-data",
+                                     parent=self.reporter):
                 self._consume_vendordata(frequency)
 
         # Perform post-consumption adjustments so that
@@ -655,7 +655,7 @@ class Modules(object):
             else:
                 raise TypeError(("Failed to read '%s' item in config,"
                                  " unknown type %s") %
-                                 (item, type_utils.obj_name(item)))
+                                (item, type_utils.obj_name(item)))
         return module_list
 
     def _fixup_modules(self, raw_mods):
@@ -762,8 +762,8 @@ class Modules(object):
 
         if skipped:
             LOG.info("Skipping modules %s because they are not verified "
-                      "on distro '%s'.  To run anyway, add them to "
-                      "'unverified_modules' in config.", skipped, d_name)
+                     "on distro '%s'.  To run anyway, add them to "
+                     "'unverified_modules' in config.", skipped, d_name)
         if forced:
             LOG.info("running unverified_modules: %s", forced)
 
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index f2e1390e..936f7da5 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -252,9 +252,9 @@ def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
             # attrs
             return UrlResponse(r)
         except exceptions.RequestException as e:
-            if (isinstance(e, (exceptions.HTTPError))
-                    and hasattr(e, 'response')  # This appeared in v 0.10.8
-                    and hasattr(e.response, 'status_code')):
+            if (isinstance(e, (exceptions.HTTPError)) and
+               hasattr(e, 'response') and  # This appeared in v 0.10.8
+               hasattr(e.response, 'status_code')):
                 excps.append(UrlError(e, code=e.response.status_code,
                                       headers=e.response.headers,
                                       url=url))
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 45d49e66..de37b0f5 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -612,7 +612,7 @@ def redirect_output(outfmt, errfmt, o_out=None, o_err=None):
 
 
 def make_url(scheme, host, port=None,
-                path='', params='', query='', fragment=''):
+             path='', params='', query='', fragment=''):
 
     pieces = []
     pieces.append(scheme or '')
@@ -804,8 +804,8 @@ def load_yaml(blob, default=None, allowed=(dict,)):
     blob = decode_binary(blob)
     try:
         LOG.debug("Attempting to load yaml from string "
-                 "of length %s with allowed root types %s",
-                 len(blob), allowed)
+                  "of length %s with allowed root types %s",
+                  len(blob), allowed)
         converted = safeyaml.load(blob)
         if not isinstance(converted, allowed):
             # Yes this will just be caught, but thats ok for now...
@@ -878,7 +878,7 @@ def read_conf_with_confd(cfgfile):
             if not isinstance(confd, six.string_types):
                 raise TypeError(("Config file %s contains 'conf_d' "
                                  "with non-string type %s") %
-                                 (cfgfile, type_utils.obj_name(confd)))
+                                (cfgfile, type_utils.obj_name(confd)))
             else:
                 confd = str(confd).strip()
     elif os.path.isdir("%s.d" % cfgfile):
@@ -1041,7 +1041,8 @@ def is_resolvable(name):
         for iname in badnames:
             try:
                 result = socket.getaddrinfo(iname, None, 0, 0,
-                    socket.SOCK_STREAM, socket.AI_CANONNAME)
+                                            socket.SOCK_STREAM,
+                                            socket.AI_CANONNAME)
                 badresults[iname] = []
                 for (_fam, _stype, _proto, cname, sockaddr) in result:
                     badresults[iname].append("%s: %s" % (cname, sockaddr[0]))
@@ -1109,7 +1110,7 @@ def close_stdin():
 
 
 def find_devs_with(criteria=None, oformat='device',
-                    tag=None, no_cache=False, path=None):
+                   tag=None, no_cache=False, path=None):
     """
     find devices matching given criteria (via blkid)
     criteria can be *one* of:
@@ -1628,7 +1629,7 @@ def write_file(filename, content, mode=0o644, omode="wb"):
         content = decode_binary(content)
         write_type = 'characters'
     LOG.debug("Writing to %s - %s: [%s] %s %s",
-               filename, omode, mode, len(content), write_type)
+              filename, omode, mode, len(content), write_type)
     with SeLinuxGuard(path=filename):
         with open(filename, omode) as fh:
             fh.write(content)
diff --git a/tests/unittests/test_data.py b/tests/unittests/test_data.py
index c603bfdb..9c1ec1d4 100644
--- a/tests/unittests/test_data.py
+++ b/tests/unittests/test_data.py
@@ -27,11 +27,12 @@ from cloudinit import stages
 from cloudinit import user_data as ud
 from cloudinit import util
 
-INSTANCE_ID = "i-testing"
-
 from . import helpers
 
 
+INSTANCE_ID = "i-testing"
+
+
 class FakeDataSource(sources.DataSource):
 
     def __init__(self, userdata=None, vendordata=None):
diff --git a/tests/unittests/test_datasource/test_altcloud.py b/tests/unittests/test_datasource/test_altcloud.py
index e9cd2fa5..85759c68 100644
--- a/tests/unittests/test_datasource/test_altcloud.py
+++ b/tests/unittests/test_datasource/test_altcloud.py
@@ -134,8 +134,7 @@ class TestGetCloudType(TestCase):
         '''
         util.read_dmi_data = _dmi_data('RHEV')
         dsrc = DataSourceAltCloud({}, None, self.paths)
-        self.assertEquals('RHEV', \
-            dsrc.get_cloud_type())
+        self.assertEquals('RHEV', dsrc.get_cloud_type())
 
     def test_vsphere(self):
         '''
@@ -144,8 +143,7 @@ class TestGetCloudType(TestCase):
         '''
         util.read_dmi_data = _dmi_data('VMware Virtual Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
-        self.assertEquals('VSPHERE', \
-            dsrc.get_cloud_type())
+        self.assertEquals('VSPHERE', dsrc.get_cloud_type())
 
     def test_unknown(self):
         '''
@@ -154,8 +152,7 @@ class TestGetCloudType(TestCase):
         '''
         util.read_dmi_data = _dmi_data('Unrecognized Platform')
         dsrc = DataSourceAltCloud({}, None, self.paths)
-        self.assertEquals('UNKNOWN', \
-            dsrc.get_cloud_type())
+        self.assertEquals('UNKNOWN', dsrc.get_cloud_type())
 
 
 class TestGetDataCloudInfoFile(TestCase):
@@ -412,27 +409,27 @@ class TestReadUserDataCallback(TestCase):
         '''Test read_user_data_callback() with both files.'''
 
         self.assertEquals('test user data',
-            read_user_data_callback(self.mount_dir))
+                          read_user_data_callback(self.mount_dir))
 
     def test_callback_dc(self):
         '''Test read_user_data_callback() with only DC file.'''
 
         _remove_user_data_files(self.mount_dir,
-            dc_file=False,
-            non_dc_file=True)
+                                dc_file=False,
+                                non_dc_file=True)
 
         self.assertEquals('test user data',
-            read_user_data_callback(self.mount_dir))
+                          read_user_data_callback(self.mount_dir))
 
     def test_callback_non_dc(self):
         '''Test read_user_data_callback() with only non-DC file.'''
 
         _remove_user_data_files(self.mount_dir,
-            dc_file=True,
-            non_dc_file=False)
+                                dc_file=True,
+                                non_dc_file=False)
 
         self.assertEquals('test user data',
-            read_user_data_callback(self.mount_dir))
+                          read_user_data_callback(self.mount_dir))
 
     def test_callback_none(self):
         '''Test read_user_data_callback() no files are found.'''
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 3933794f..4c9c7d8b 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -207,7 +207,7 @@ class TestAzureDataSource(TestCase):
         yaml_cfg = "{agent_command: my_command}\n"
         cfg = yaml.safe_load(yaml_cfg)
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
+                 'dscfg': {'text': yaml_cfg, 'encoding': 'plain'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         dsrc = self._get_ds(data)
@@ -219,8 +219,8 @@ class TestAzureDataSource(TestCase):
         # set dscfg in via base64 encoded yaml
         cfg = {'agent_command': "my_command"}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': b64e(yaml.dump(cfg)),
-                          'encoding': 'base64'}}
+                 'dscfg': {'text': b64e(yaml.dump(cfg)),
+                           'encoding': 'base64'}}
         data = {'ovfcontent': construct_valid_ovf_env(data=odata)}
 
         dsrc = self._get_ds(data)
@@ -267,7 +267,8 @@ class TestAzureDataSource(TestCase):
         # should equal that after the '$'
         pos = defuser['passwd'].rfind("$") + 1
         self.assertEqual(defuser['passwd'],
-            crypt.crypt(odata['UserPassword'], defuser['passwd'][0:pos]))
+                         crypt.crypt(odata['UserPassword'],
+                         defuser['passwd'][0:pos]))
 
     def test_userdata_plain(self):
         mydata = "FOOBAR"
@@ -364,8 +365,8 @@ class TestAzureDataSource(TestCase):
         # Make sure that user can affect disk aliases
         dscfg = {'disk_aliases': {'ephemeral0': '/dev/sdc'}}
         odata = {'HostName': "myhost", 'UserName': "myuser",
-                'dscfg': {'text': b64e(yaml.dump(dscfg)),
-                          'encoding': 'base64'}}
+                 'dscfg': {'text': b64e(yaml.dump(dscfg)),
+                           'encoding': 'base64'}}
         usercfg = {'disk_setup': {'/dev/sdc': {'something': '...'},
                                   'ephemeral0': False}}
         userdata = '#cloud-config' + yaml.dump(usercfg) + "\n"
@@ -634,7 +635,7 @@ class TestReadAzureOvf(TestCase):
     def test_invalid_xml_raises_non_azure_ds(self):
         invalid_xml = "<foo>" + construct_valid_ovf_env(data={})
         self.assertRaises(DataSourceAzure.BrokenAzureDataSource,
-            DataSourceAzure.read_azure_ovf, invalid_xml)
+                          DataSourceAzure.read_azure_ovf, invalid_xml)
 
     def test_load_with_pubkeys(self):
         mypklist = [{'fingerprint': 'fp1', 'path': 'path1', 'value': ''}]
diff --git a/tests/unittests/test_datasource/test_configdrive.py b/tests/unittests/test_datasource/test_configdrive.py
index 83aca505..3954ceb3 100644
--- a/tests/unittests/test_datasource/test_configdrive.py
+++ b/tests/unittests/test_datasource/test_configdrive.py
@@ -293,9 +293,8 @@ class TestConfigDriveDataSource(TestCase):
             util.is_partition = my_is_partition
 
             devs_with_answers = {"TYPE=vfat": [],
-                "TYPE=iso9660": ["/dev/vdb"],
-                "LABEL=config-2": ["/dev/vdb"],
-            }
+                                 "TYPE=iso9660": ["/dev/vdb"],
+                                 "LABEL=config-2": ["/dev/vdb"]}
             self.assertEqual(["/dev/vdb"], ds.find_candidate_devs())
 
             # add a vfat item
@@ -306,9 +305,10 @@ class TestConfigDriveDataSource(TestCase):
 
             # verify that partitions are considered, that have correct label.
             devs_with_answers = {"TYPE=vfat": ["/dev/sda1"],
-                "TYPE=iso9660": [], "LABEL=config-2": ["/dev/vdb3"]}
+                                 "TYPE=iso9660": [],
+                                 "LABEL=config-2": ["/dev/vdb3"]}
             self.assertEqual(["/dev/vdb3"],
-                              ds.find_candidate_devs())
+                             ds.find_candidate_devs())
 
         finally:
             util.find_devs_with = orig_find_devs_with
@@ -319,7 +319,7 @@ class TestConfigDriveDataSource(TestCase):
         populate_dir(self.tmp, CFG_DRIVE_FILES_V2)
         myds = cfg_ds_from_dir(self.tmp)
         self.assertEqual(myds.get_public_ssh_keys(),
-           [OSTACK_META['public_keys']['mykey']])
+                         [OSTACK_META['public_keys']['mykey']])
 
 
 def cfg_ds_from_dir(seed_d):
diff --git a/tests/unittests/test_datasource/test_maas.py b/tests/unittests/test_datasource/test_maas.py
index eb97b692..77d15cac 100644
--- a/tests/unittests/test_datasource/test_maas.py
+++ b/tests/unittests/test_datasource/test_maas.py
@@ -25,9 +25,9 @@ class TestMAASDataSource(TestCase):
         """Verify a valid seeddir is read as such."""
 
         data = {'instance-id': 'i-valid01',
-            'local-hostname': 'valid01-hostname',
-            'user-data': b'valid01-userdata',
-            'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
+                'local-hostname': 'valid01-hostname',
+                'user-data': b'valid01-userdata',
+                'public-keys': 'ssh-rsa AAAAB3Nz...aC1yc2E= keyname'}
 
         my_d = os.path.join(self.tmp, "valid")
         populate_dir(my_d, data)
@@ -45,8 +45,8 @@ class TestMAASDataSource(TestCase):
         """Verify extra files do not affect seed_dir validity."""
 
         data = {'instance-id': 'i-valid-extra',
-            'local-hostname': 'valid-extra-hostname',
-            'user-data': b'valid-extra-userdata', 'foo': 'bar'}
+                'local-hostname': 'valid-extra-hostname',
+                'user-data': b'valid-extra-userdata', 'foo': 'bar'}
 
         my_d = os.path.join(self.tmp, "valid_extra")
         populate_dir(my_d, data)
@@ -64,7 +64,7 @@ class TestMAASDataSource(TestCase):
         """Verify that invalid seed_dir raises MAASSeedDirMalformed."""
 
         valid = {'instance-id': 'i-instanceid',
-            'local-hostname': 'test-hostname', 'user-data': ''}
+                 'local-hostname': 'test-hostname', 'user-data': ''}
 
         my_based = os.path.join(self.tmp, "valid_extra")
 
@@ -94,8 +94,8 @@ class TestMAASDataSource(TestCase):
     def test_seed_dir_missing(self):
         """Verify that missing seed_dir raises MAASSeedDirNone."""
         self.assertRaises(DataSourceMAAS.MAASSeedDirNone,
-            DataSourceMAAS.read_maas_seed_dir,
-            os.path.join(self.tmp, "nonexistantdirectory"))
+                          DataSourceMAAS.read_maas_seed_dir,
+                          os.path.join(self.tmp, "nonexistantdirectory"))
 
     def test_seed_url_valid(self):
         """Verify that valid seed_url is read as such."""
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 1235436d..5e617b83 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -463,8 +463,8 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
                 payloadstr = ' {0}'.format(self.response_parts['payload'])
             return ('V2 {length} {crc} {request_id} '
                     '{command}{payloadstr}\n'.format(
-                    payloadstr=payloadstr,
-                    **self.response_parts).encode('ascii'))
+                     payloadstr=payloadstr,
+                     **self.response_parts).encode('ascii'))
 
         self.metasource_data = None
 
@@ -501,7 +501,7 @@ class TestJoyentMetadataClient(helpers.FilesystemMockingTestCase):
         written_line = self.serial.write.call_args[0][0]
         print(type(written_line))
         self.assertEndsWith(written_line.decode('ascii'),
-            b'\n'.decode('ascii'))
+                            b'\n'.decode('ascii'))
         self.assertEqual(1, written_line.count(b'\n'))
 
     def _get_written_line(self, key='some_key'):
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index 5687b10d..f9660ff6 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -74,7 +74,7 @@ class TestLoadPowerState(t_help.TestCase):
 class TestCheckCondition(t_help.TestCase):
     def cmd_with_exit(self, rc):
         return([sys.executable, '-c', 'import sys; sys.exit(%s)' % rc])
-        
+
     def test_true_is_true(self):
         self.assertEqual(psc.check_condition(True), True)
 
@@ -94,7 +94,6 @@ class TestCheckCondition(t_help.TestCase):
         self.assertEqual(mocklog.warn.call_count, 1)
 
 
-
 def check_lps_ret(psc_return, mode=None):
     if len(psc_return) != 3:
         raise TypeError("length returned = %d" % len(psc_return))
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 0bcdcb31..34d11f21 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -190,7 +190,8 @@ class TestRandomSeed(t_help.TestCase):
         c = self._get_cloud('ubuntu', {})
         self.whichdata = {}
         self.assertRaises(ValueError, cc_seed_random.handle,
-            'test', {'random_seed': {'command_required': True}}, c, LOG, [])
+                          'test', {'random_seed': {'command_required': True}},
+                          c, LOG, [])
 
     def test_seed_command_and_required(self):
         c = self._get_cloud('ubuntu', {})
diff --git a/tests/unittests/test_handler/test_handler_snappy.py b/tests/unittests/test_handler/test_handler_snappy.py
index eceb14d9..8aeff53c 100644
--- a/tests/unittests/test_handler/test_handler_snappy.py
+++ b/tests/unittests/test_handler/test_handler_snappy.py
@@ -125,8 +125,7 @@ class TestInstallPackages(t_help.TestCase):
              "pkg1.smoser.config": "pkg1.smoser.config-data",
              "pkg1.config": "pkg1.config-data",
              "pkg2.smoser_0.0_amd64.snap": "pkg2-snapdata",
-             "pkg2.smoser_0.0_amd64.config": "pkg2.config",
-            })
+             "pkg2.smoser_0.0_amd64.config": "pkg2.config"})
 
         ret = get_package_ops(
             packages=[], configs={}, installed=[], fspath=self.tmp)
diff --git a/tests/unittests/test_sshutil.py b/tests/unittests/test_sshutil.py
index 3b317121..9aeb1cde 100644
--- a/tests/unittests/test_sshutil.py
+++ b/tests/unittests/test_sshutil.py
@@ -32,7 +32,8 @@ VALID_CONTENT = {
     ),
 }
 
-TEST_OPTIONS = ("no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
+TEST_OPTIONS = (
+    "no-port-forwarding,no-agent-forwarding,no-X11-forwarding,"
     'command="echo \'Please login as the user \"ubuntu\" rather than the'
     'user \"root\".\';echo;sleep 10"')
 
diff --git a/tests/unittests/test_templating.py b/tests/unittests/test_templating.py
index 0c19a2c2..b9863650 100644
--- a/tests/unittests/test_templating.py
+++ b/tests/unittests/test_templating.py
@@ -114,5 +114,6 @@ $a,$b'''
                                                                codename)
 
         out_data = templater.basic_render(in_data,
-            {'mirror': mirror, 'codename': codename})
+                                          {'mirror': mirror,
+                                           'codename': codename})
         self.assertEqual(ex_data, out_data)
diff --git a/tools/hacking.py b/tools/hacking.py
index 3175df38..1a0631c2 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -47,10 +47,10 @@ def import_normalize(line):
     # handle "from x import y as z" to "import x.y as z"
     split_line = line.split()
     if (line.startswith("from ") and "," not in line and
-           split_line[2] == "import" and split_line[3] != "*" and
-           split_line[1] != "__future__" and
-           (len(split_line) == 4 or
-           (len(split_line) == 6 and split_line[4] == "as"))):
+       split_line[2] == "import" and split_line[3] != "*" and
+       split_line[1] != "__future__" and
+       (len(split_line) == 4 or
+       (len(split_line) == 6 and split_line[4] == "as"))):
         return "import %s.%s" % (split_line[1], split_line[3])
     else:
         return line
@@ -74,7 +74,7 @@ def cloud_import_alphabetical(physical_line, line_number, lines):
             split_line[0] == "import" and split_previous[0] == "import"):
         if split_line[1] < split_previous[1]:
             return (0, "N306: imports not in alphabetical order (%s, %s)"
-                % (split_previous[1], split_line[1]))
+                    % (split_previous[1], split_line[1]))
 
 
 def cloud_docstring_start_space(physical_line):
@@ -87,8 +87,8 @@ def cloud_docstring_start_space(physical_line):
     pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE])  # start
     if (pos != -1 and len(physical_line) > pos + 1):
         if (physical_line[pos + 3] == ' '):
-            return (pos, "N401: one line docstring should not start with"
-                " a space")
+            return (pos,
+                    "N401: one line docstring should not start with a space")
 
 
 def cloud_todo_format(physical_line):
@@ -167,4 +167,4 @@ if __name__ == "__main__":
     finally:
         if len(_missingImport) > 0:
             print >> sys.stderr, ("%i imports missing in this test environment"
-                    % len(_missingImport))
+                                  % len(_missingImport))
diff --git a/tools/mock-meta.py b/tools/mock-meta.py
index dfbc2a71..1c746f17 100755
--- a/tools/mock-meta.py
+++ b/tools/mock-meta.py
@@ -126,11 +126,11 @@ class WebException(Exception):
 
 def yamlify(data):
     formatted = yaml.dump(data,
-        line_break="\n",
-        indent=4,
-        explicit_start=True,
-        explicit_end=True,
-        default_flow_style=False)
+                          line_break="\n",
+                          indent=4,
+                          explicit_start=True,
+                          explicit_end=True,
+                          default_flow_style=False)
     return formatted
 
 
@@ -282,7 +282,7 @@ class MetaDataHandler(object):
         else:
             log.warn(("Did not implement action %s, "
                       "returning empty response: %r"),
-                      action, NOT_IMPL_RESPONSE)
+                     action, NOT_IMPL_RESPONSE)
             return NOT_IMPL_RESPONSE
 
 
@@ -404,14 +404,17 @@ def setup_logging(log_level, fmt='%(levelname)s: @%(name)s : %(message)s'):
 def extract_opts():
     parser = OptionParser()
     parser.add_option("-p", "--port", dest="port", action="store", type=int,
-        default=80, metavar="PORT",
-        help="port from which to serve traffic (default: %default)")
+                      default=80, metavar="PORT",
+                      help=("port from which to serve traffic"
+                            " (default: %default)"))
     parser.add_option("-a", "--addr", dest="address", action="store", type=str,
-        default='0.0.0.0', metavar="ADDRESS",
-        help="address from which to serve traffic (default: %default)")
+                      default='0.0.0.0', metavar="ADDRESS",
+                      help=("address from which to serve traffic"
+                            " (default: %default)"))
     parser.add_option("-f", '--user-data-file', dest='user_data_file',
-        action='store', metavar='FILE',
-        help="user data filename to serve back to incoming requests")
+                      action='store', metavar='FILE',
+                      help=("user data filename to serve back to"
+                            "incoming requests"))
     (options, args) = parser.parse_args()
     out = dict()
     out['extra'] = args
diff --git a/tools/run-pep8 b/tools/run-pep8
index ccd6be5a..086400fc 100755
--- a/tools/run-pep8
+++ b/tools/run-pep8
@@ -1,39 +1,22 @@
 #!/bin/bash
 
-if [ $# -eq 0 ]; then
-   files=( bin/cloud-init $(find * -name "*.py" -type f) )
+pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" )
+# FIXME: cloud-init modifies sys module path, pep8 does not like
+# bin_files=( "bin/cloud-init" )
+CR="
+"
+[ "$1" = "-v" ] && { verbose="$1"; shift; } || verbose=""
+
+set -f
+if [ $# -eq 0 ]; then unset IFS
+   IFS="$CR"
+   files=( "${bin_files[@]}" "${pycheck_dirs[@]}" )
+   unset IFS
 else
-   files=( "$@" );
+   files=( "$@" )
 fi
 
-if [ -f 'hacking.py' ]
-then
-    base=`pwd`
-else
-    base=`pwd`/tools/
-fi
-
-IGNORE=""
-
-# King Arthur: Be quiet! ... Be Quiet! I Order You to Be Quiet.
-IGNORE="$IGNORE,E121" # Continuation line indentation is not a multiple of four
-IGNORE="$IGNORE,E123" # Closing bracket does not match indentation of opening bracket's line
-IGNORE="$IGNORE,E124" # Closing bracket missing visual indentation
-IGNORE="$IGNORE,E125" # Continuation line does not distinguish itself from next logical line
-IGNORE="$IGNORE,E126" # Continuation line over-indented for hanging indent
-IGNORE="$IGNORE,E127" # Continuation line over-indented for visual indent
-IGNORE="$IGNORE,E128" # Continuation line under-indented for visual indent
-IGNORE="$IGNORE,E502" # The backslash is redundant between brackets
-IGNORE="${IGNORE#,}"  # remove the leading ',' added above
-
-cmd=(
-    ${base}/hacking.py
-
-    --ignore="$IGNORE"
-
-    "${files[@]}"
-)
-
-echo -e "\nRunning 'cloudinit' pep8:"
-echo "${cmd[@]}"
-"${cmd[@]}"
+myname=${0##*/}
+cmd=( "${myname#run-}" $verbose "${files[@]}" )
+echo "Running: " "${cmd[@]}" 1>&2
+exec "${cmd[@]}"
-- 
cgit v1.2.3


From 3d9153d16b194e7a3139c290e723ef17518e617d Mon Sep 17 00:00:00 2001
From: Ryan Harper <ryan.harper@canonical.com>
Date: Thu, 3 Mar 2016 16:32:32 -0600
Subject: Fix pyflake/pyflake3 errors

Now we can run make check to assess pep8, pyflakes for python2 or 3
And execute unittests via nosetests (2 and 3).
---
 Makefile                                               |  1 -
 cloudinit/util.py                                      |  2 +-
 tests/unittests/test_datasource/test_azure_helper.py   |  2 --
 tests/unittests/test_datasource/test_smartos.py        |  1 -
 .../unittests/test_handler/test_handler_power_state.py |  2 +-
 tools/run-pyflakes                                     | 18 ++++++++++++++++++
 tools/run-pyflakes3                                    |  2 ++
 7 files changed, 22 insertions(+), 6 deletions(-)
 create mode 100755 tools/run-pyflakes
 create mode 100755 tools/run-pyflakes3

(limited to 'cloudinit')

diff --git a/Makefile b/Makefile
index fb65b70b..fc91f829 100644
--- a/Makefile
+++ b/Makefile
@@ -28,7 +28,6 @@ pyflakes:
 pyflakes3:
 	@$(CWD)/tools/run-pyflakes3
 	
-
 unittest:
 	nosetests $(noseopts) tests/unittests
 	nosetests3 $(noseopts) tests/unittests
diff --git a/cloudinit/util.py b/cloudinit/util.py
index de37b0f5..e7407ea4 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2148,7 +2148,7 @@ def _read_dmi_syspath(key):
         LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
         return key_data.strip()
 
-    except Exception as e:
+    except Exception:
         logexc(LOG, "failed read of %s", dmi_key_path)
         return None
 
diff --git a/tests/unittests/test_datasource/test_azure_helper.py b/tests/unittests/test_datasource/test_azure_helper.py
index 8dbdfb0b..1134199b 100644
--- a/tests/unittests/test_datasource/test_azure_helper.py
+++ b/tests/unittests/test_datasource/test_azure_helper.py
@@ -1,6 +1,4 @@
 import os
-import struct
-import unittest
 
 from cloudinit.sources.helpers import azure as azure_helper
 from ..helpers import TestCase
diff --git a/tests/unittests/test_datasource/test_smartos.py b/tests/unittests/test_datasource/test_smartos.py
index 5e617b83..616e9f0e 100644
--- a/tests/unittests/test_datasource/test_smartos.py
+++ b/tests/unittests/test_datasource/test_smartos.py
@@ -31,7 +31,6 @@ import shutil
 import stat
 import tempfile
 import uuid
-import unittest
 from binascii import crc32
 
 import serial
diff --git a/tests/unittests/test_handler/test_handler_power_state.py b/tests/unittests/test_handler/test_handler_power_state.py
index f9660ff6..04ce5687 100644
--- a/tests/unittests/test_handler/test_handler_power_state.py
+++ b/tests/unittests/test_handler/test_handler_power_state.py
@@ -106,7 +106,7 @@ def check_lps_ret(psc_return, mode=None):
     if 'shutdown' not in psc_return[0][0]:
         errs.append("string 'shutdown' not in cmd")
 
-    if 'condition' is None:
+    if condition is None:
         errs.append("condition was not returned")
 
     if mode is not None:
diff --git a/tools/run-pyflakes b/tools/run-pyflakes
new file mode 100755
index 00000000..4bea17f4
--- /dev/null
+++ b/tools/run-pyflakes
@@ -0,0 +1,18 @@
+#!/bin/bash
+
+PYTHON_VERSION=${PYTHON_VERSION:-2}
+CR="
+"
+pycheck_dirs=( "cloudinit/" "bin/" "tests/" "tools/" )
+
+set -f
+if [ $# -eq 0 ]; then
+   files=( "${pycheck_dirs[@]}" )
+else
+   files=( "$@" )
+fi
+
+cmd=( "python${PYTHON_VERSION}" -m "pyflakes" "${files[@]}" )
+
+echo "Running: " "${cmd[@]}" 1>&2
+exec "${cmd[@]}"
diff --git a/tools/run-pyflakes3 b/tools/run-pyflakes3
new file mode 100755
index 00000000..e9f0863d
--- /dev/null
+++ b/tools/run-pyflakes3
@@ -0,0 +1,2 @@
+#!/bin/sh
+PYTHON_VERSION=3 exec "${0%/*}/run-pyflakes" "$@"
-- 
cgit v1.2.3


From bbf105baafbe788f7babbda188b513180424e256 Mon Sep 17 00:00:00 2001
From: Sankar Tanguturi <stanguturi@stanguturi-rhel>
Date: Thu, 3 Mar 2016 16:01:39 -0800
Subject:  Resolved all the pep8 errors.  Executed ./tools/run-pep8
 cloudinit/sources/DataSourceOVF.py and no errors  were reported.

---
 cloudinit/sources/DataSourceOVF.py | 25 +++++++++++++++----------
 1 file changed, 15 insertions(+), 10 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index d92c128c..d07f6219 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -66,13 +66,14 @@ class DataSourceOVF(sources.DataSource):
 
         system_type = util.read_dmi_data("system-product-name")
         if system_type is None:
-           LOG.debug("No system-product-name found")
+            LOG.debug("No system-product-name found")
         elif 'vmware' in system_type.lower():
             LOG.debug("VMware Virtualization Platform found")
             if not util.get_cfg_option_bool(self.sys_cfg,
                                         "disable_vmware_customization",
                                         True):
-                deployPkgPluginPath = search_file("/usr/lib/vmware-tools", "libdeployPkgPlugin.so")
+                deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
+                                                  "libdeployPkgPlugin.so")
                 if deployPkgPluginPath:
                     vmwareImcConfigFilePath = util.log_time(logfunc=LOG.debug,
                                       msg="waiting for configuration file",
@@ -80,7 +81,8 @@ class DataSourceOVF(sources.DataSource):
                                       args=("/tmp", "cust.cfg"))
 
                 if vmwareImcConfigFilePath:
-                    LOG.debug("Found VMware DeployPkg Config File Path at %s" % vmwareImcConfigFilePath)
+                    LOG.debug("Found VMware DeployPkg Config File at %s" %
+                              vmwareImcConfigFilePath)
                 else:
                     LOG.debug("Did not find VMware DeployPkg Config File Path")
             else:
@@ -151,7 +153,7 @@ class DataSourceOVF(sources.DataSource):
 
     def get_public_ssh_keys(self):
         if 'public-keys' not in self.metadata:
-           return []
+            return []
         pks = self.metadata['public-keys']
         if isinstance(pks, (list)):
             return pks
@@ -174,7 +176,7 @@ class DataSourceOVFNet(DataSourceOVF):
 
 def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
     waited = 0
-    
+
     while waited < maxwait:
         fileFullPath = search_file(dirpath, filename)
         if fileFullPath:
@@ -183,6 +185,7 @@ def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
         waited += naplen
     return None
 
+
 # This will return a dict with some content
 #  meta-data, user-data, some config
 def read_vmware_imc(config):
@@ -190,13 +193,14 @@ def read_vmware_imc(config):
     cfg = {}
     ud = ""
     if config.host_name:
-       if config.domain_name:
-          md['local-hostname'] = config.host_name + "." + config.domain_name
-       else:
-          md['local-hostname'] = config.host_name
+        if config.domain_name:
+            md['local-hostname'] = config.host_name + "." + config.domain_name
+        else:
+            md['local-hostname'] = config.host_name
 
     return (md, ud, cfg)
 
+
 # This will return a dict with some content
 #  meta-data, user-data, some config
 def read_ovf_environment(contents):
@@ -351,7 +355,7 @@ def get_properties(contents):
 
 def search_file(dirpath, filename):
     if not dirpath or not filename:
-       return None
+        return None
 
     for root, dirs, files in os.walk(dirpath):
         if filename in files:
@@ -359,6 +363,7 @@ def search_file(dirpath, filename):
 
     return None
 
+
 class XmlError(Exception):
     pass
 
-- 
cgit v1.2.3


From 70acc910c3368980d7cb8971391a2c9dfaf3fda8 Mon Sep 17 00:00:00 2001
From: Ryan Harper <ryan.harper@canonical.com>
Date: Fri, 4 Mar 2016 09:51:05 -0600
Subject: pep8: update formatting to pass pep8 1.4.6 (trusty) and 1.6.2
 (xenial)

make check fails in a trusty sbuild due to different rules on older pep8.
Fix formatting to pass in older and newer pep8.
---
 cloudinit/config/cc_rh_subscription.py        | 4 +---
 tests/unittests/test_datasource/test_azure.py | 2 +-
 tools/hacking.py                              | 4 ++--
 3 files changed, 4 insertions(+), 6 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/config/cc_rh_subscription.py b/cloudinit/config/cc_rh_subscription.py
index 6f474aed..6087c45c 100644
--- a/cloudinit/config/cc_rh_subscription.py
+++ b/cloudinit/config/cc_rh_subscription.py
@@ -126,10 +126,8 @@ class SubscriptionManager(object):
                        "(True/False "
             return False, not_bool
 
-        if (self.servicelevel is not None) and \
-           ((not self.auto_attach) or
+        if (self.servicelevel is not None) and ((not self.auto_attach) or
            (util.is_false(str(self.auto_attach)))):
-
             no_auto = ("The service-level key must be used in conjunction "
                        "with the auto-attach key.  Please re-run with "
                        "auto-attach: True")
diff --git a/tests/unittests/test_datasource/test_azure.py b/tests/unittests/test_datasource/test_azure.py
index 4c9c7d8b..444e2799 100644
--- a/tests/unittests/test_datasource/test_azure.py
+++ b/tests/unittests/test_datasource/test_azure.py
@@ -268,7 +268,7 @@ class TestAzureDataSource(TestCase):
         pos = defuser['passwd'].rfind("$") + 1
         self.assertEqual(defuser['passwd'],
                          crypt.crypt(odata['UserPassword'],
-                         defuser['passwd'][0:pos]))
+                                     defuser['passwd'][0:pos]))
 
     def test_userdata_plain(self):
         mydata = "FOOBAR"
diff --git a/tools/hacking.py b/tools/hacking.py
index 1a0631c2..716c1154 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -49,8 +49,8 @@ def import_normalize(line):
     if (line.startswith("from ") and "," not in line and
        split_line[2] == "import" and split_line[3] != "*" and
        split_line[1] != "__future__" and
-       (len(split_line) == 4 or
-       (len(split_line) == 6 and split_line[4] == "as"))):
+       (len(split_line) == 4 or (len(split_line) == 6 and
+                                 split_line[4] == "as"))):
         return "import %s.%s" % (split_line[1], split_line[3])
     else:
         return line
-- 
cgit v1.2.3


From 0a4c7983613a134fa62b8ee0c11b558f9e405346 Mon Sep 17 00:00:00 2001
From: Alex Sirbu <alexandru.sirbu@bigstep.com>
Date: Mon, 7 Mar 2016 09:13:17 +0000
Subject: Enable Bigstep data source in default configuration

---
 cloudinit/settings.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/settings.py b/cloudinit/settings.py
index b61e5613..8c258ea1 100644
--- a/cloudinit/settings.py
+++ b/cloudinit/settings.py
@@ -42,6 +42,7 @@ CFG_BUILTIN = {
         'CloudSigma',
         'CloudStack',
         'SmartOS',
+        'Bigstep',
         # At the end to act as a 'catch' when none of the above work...
         'None',
     ],
-- 
cgit v1.2.3


From 9ec6c876b72ccfa2ae590505fe6dbf7c0c561520 Mon Sep 17 00:00:00 2001
From: Alex Sirbu <alexandru.sirbu@bigstep.com>
Date: Mon, 7 Mar 2016 09:33:40 +0000
Subject: Returning false if file does not exist, instead of throwing error

---
 cloudinit/sources/DataSourceBigstep.py | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index c22ffdb6..2d66c609 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -5,6 +5,7 @@
 #
 
 import json
+import errno
 
 from cloudinit import log as logging
 from cloudinit import sources
@@ -22,7 +23,13 @@ class DataSourceBigstep(sources.DataSource):
         self.userdata_raw = ""
 
     def get_data(self, apply_filter=False):
-        url = get_url_from_file()
+        try:
+            url = get_url_from_file()
+        except IOError as e:
+            if e.errno == errno.ENOENT:
+                return False
+            else:
+                raise
         response = url_helper.readurl(url)
         decoded = json.loads(response.contents)
         self.metadata = decoded["metadata"]
-- 
cgit v1.2.3


From d23868d6d3e35a91c348b94ce8416f56514aaf15 Mon Sep 17 00:00:00 2001
From: Alex Sirbu <alexandru.sirbu@bigstep.com>
Date: Mon, 7 Mar 2016 12:30:08 +0000
Subject: Implemented review concerning position of try and more information
 about the caught exception.

---
 cloudinit/sources/DataSourceBigstep.py | 20 ++++++++++++--------
 1 file changed, 12 insertions(+), 8 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
index 2d66c609..b5ee4129 100644
--- a/cloudinit/sources/DataSourceBigstep.py
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -23,13 +23,9 @@ class DataSourceBigstep(sources.DataSource):
         self.userdata_raw = ""
 
     def get_data(self, apply_filter=False):
-        try:
-            url = get_url_from_file()
-        except IOError as e:
-            if e.errno == errno.ENOENT:
-                return False
-            else:
-                raise
+        url = get_url_from_file()
+        if url is None:
+            return False
         response = url_helper.readurl(url)
         decoded = json.loads(response.contents)
         self.metadata = decoded["metadata"]
@@ -39,7 +35,15 @@ class DataSourceBigstep(sources.DataSource):
 
 
 def get_url_from_file():
-    content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
+    try:
+        content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
+    except IOError as e:
+        # If the file doesn't exist, then the server probably isn't a Bigstep
+        # instance; otherwise, another problem exists which needs investigation
+        if e.errno == errno.ENOENT:
+            return None
+        else:
+            raise
     return content
 
 # Used to match classes to dependencies
-- 
cgit v1.2.3


From 6e31038b9cccbcb4a33693060b96fc4f71d86789 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 7 Mar 2016 21:31:25 -0500
Subject: No longer run pollinate by default in seed_random

The user can still choose to run pollinate here to seed their
random data.  And in an environment with network datasource, that
would be expected to work.  However, we do not want to run it any
more from cloud-init because
a.) pollinate's own init system jobs should get it ran before ssh,
    which is the primary purpose of wanting cloud-init to run it.
b.) with a local datasource, there is no network guarantee when
    init_modules run, so pollinate -q would often cause issues then.
c.) cloud-init would run pollinate and log the failure causing
    many cloud-init specific failures that it could do nothing about.

LP: #1554152
---
 ChangeLog                                                |  1 +
 cloudinit/config/cc_seed_random.py                       |  2 +-
 tests/unittests/test_handler/test_handler_seed_random.py | 14 ++++++++------
 3 files changed, 10 insertions(+), 7 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index a80a5d5f..6da276b5 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -85,6 +85,7 @@
    unless it is already a file (LP: #1543025).
  - Enable password changing via a hashed string [Alex Sirbu]
  - Added BigStep datasource [Alex Sirbu]
+ - No longer run pollinate in seed_random (LP: #1554152)
 
 0.7.6:
  - open 0.7.6
diff --git a/cloudinit/config/cc_seed_random.py b/cloudinit/config/cc_seed_random.py
index 3288a853..1b011216 100644
--- a/cloudinit/config/cc_seed_random.py
+++ b/cloudinit/config/cc_seed_random.py
@@ -83,7 +83,7 @@ def handle(name, cfg, cloud, log, _args):
                   len(seed_data), seed_path)
         util.append_file(seed_path, seed_data)
 
-    command = mycfg.get('command', ['pollinate', '-q'])
+    command = mycfg.get('command', None)
     req = mycfg.get('command_required', False)
     try:
         env = os.environ.copy()
diff --git a/tests/unittests/test_handler/test_handler_seed_random.py b/tests/unittests/test_handler/test_handler_seed_random.py
index 34d11f21..98bc9b81 100644
--- a/tests/unittests/test_handler/test_handler_seed_random.py
+++ b/tests/unittests/test_handler/test_handler_seed_random.py
@@ -170,28 +170,30 @@ class TestRandomSeed(t_help.TestCase):
         contents = util.load_file(self._seed_file)
         self.assertEquals('tiny-tim-was-here-so-was-josh', contents)
 
-    def test_seed_command_not_provided_pollinate_available(self):
+    def test_seed_command_provided_and_available(self):
         c = self._get_cloud('ubuntu', {})
         self.whichdata = {'pollinate': '/usr/bin/pollinate'}
-        cc_seed_random.handle('test', {}, c, LOG, [])
+        cfg = {'random_seed': {'command': ['pollinate', '-q']}}
+        cc_seed_random.handle('test', cfg, c, LOG, [])
 
         subp_args = [f['args'] for f in self.subp_called]
         self.assertIn(['pollinate', '-q'], subp_args)
 
-    def test_seed_command_not_provided_pollinate_not_available(self):
+    def test_seed_command_not_provided(self):
         c = self._get_cloud('ubuntu', {})
         self.whichdata = {}
         cc_seed_random.handle('test', {}, c, LOG, [])
 
         # subp should not have been called as which would say not available
-        self.assertEquals(self.subp_called, list())
+        self.assertFalse(self.subp_called)
 
     def test_unavailable_seed_command_and_required_raises_error(self):
         c = self._get_cloud('ubuntu', {})
         self.whichdata = {}
+        cfg = {'random_seed': {'command': ['THIS_NO_COMMAND'],
+                               'command_required': True}}
         self.assertRaises(ValueError, cc_seed_random.handle,
-                          'test', {'random_seed': {'command_required': True}},
-                          c, LOG, [])
+                          'test', cfg, c, LOG, [])
 
     def test_seed_command_and_required(self):
         c = self._get_cloud('ubuntu', {})
-- 
cgit v1.2.3


From b839ad32b9bf4541583ecbe68a0bd5dd9f12345a Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 10 Mar 2016 12:32:46 -0500
Subject: dmi data: fix failure of reading dmi data for unset dmi values

it is not uncommon to find dmi data in /sys full of 'ff'. utf-8
decoding of those would fail, causing warning and stacktrace.

Return '.' instead of \xff. This maps to what dmidecode would return

$ dmidecode --string system-product-name
.................................
---
 ChangeLog                    |  1 +
 cloudinit/util.py            | 13 ++++++++++---
 tests/unittests/test_util.py |  9 +++++++++
 3 files changed, 20 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/ChangeLog b/ChangeLog
index da1ca9ee..ebaacf6a 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -88,6 +88,7 @@
  - No longer run pollinate in seed_random (LP: #1554152)
  - groups: add defalt user to 'lxd' group.  Create groups listed
    for a user if they do not exist. (LP: #1539317)
+ - dmi data: fix failure of reading dmi data for unset dmi values
 
 0.7.6:
  - open 0.7.6
diff --git a/cloudinit/util.py b/cloudinit/util.py
index e7407ea4..1d50edc9 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2140,13 +2140,20 @@ def _read_dmi_syspath(key):
             LOG.debug("did not find %s", dmi_key_path)
             return None
 
-        key_data = load_file(dmi_key_path)
+        key_data = load_file(dmi_key_path, decode=False)
         if not key_data:
             LOG.debug("%s did not return any data", dmi_key_path)
             return None
 
-        LOG.debug("dmi data %s returned %s", dmi_key_path, key_data)
-        return key_data.strip()
+        # in the event that this is all \xff and a carriage return
+        # then return '.' in its place.
+        if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
+            key_data = b'.' * (len(key_data) - 1) + b'\n'
+
+        str_data = key_data.decode('utf8').strip()
+
+        LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
+        return str_data
 
     except Exception:
         logexc(LOG, "failed read of %s", dmi_key_path)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 95990165..542e4075 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -385,6 +385,15 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
         self.patch_mapping({})
         self.assertEqual(None, util.read_dmi_data('expect-fail'))
 
+    def test_dots_returned_instead_of_foxfox(self):
+        my_len = 32
+        dmi_value = b'\xff' * my_len + b'\n'
+        expected = '.' * my_len
+        dmi_key = 'system-product-name'
+        sysfs_key = 'product_name'
+        self._create_sysfs_file(sysfs_key, dmi_value)
+        self.assertEqual(expected, util.read_dmi_data(dmi_key))
+
 
 class TestMultiLog(helpers.FilesystemMockingTestCase):
 
-- 
cgit v1.2.3


From be38478cd8e11b0e29c70bb881a676628e9f74d5 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Thu, 10 Mar 2016 12:47:55 -0500
Subject: improve comment

---
 cloudinit/util.py | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 1d50edc9..1a517c79 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2145,13 +2145,12 @@ def _read_dmi_syspath(key):
             LOG.debug("%s did not return any data", dmi_key_path)
             return None
 
-        # in the event that this is all \xff and a carriage return
-        # then return '.' in its place.
+        # uninitialized dmi values show as all \xff and /sys appends a '\n'. 
+        # in that event, return a string of '.' in the same length.
         if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
             key_data = b'.' * (len(key_data) - 1) + b'\n'
 
         str_data = key_data.decode('utf8').strip()
-
         LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
         return str_data
 
-- 
cgit v1.2.3


From 03f80fa62eb85270a7a96850c5e689a1c4bc0049 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 14 Mar 2016 09:21:02 -0400
Subject: change return value for dmi data of all \xff to be ""

Previously we returned a string of "." the same length as the dmi field.
That seems confusing to the user as "." would seem like a valid response
when in fact this value should not be considered valid.

So now, in this case, return empty string.
---
 cloudinit/util.py            | 7 +++++--
 tests/unittests/test_util.py | 2 +-
 2 files changed, 6 insertions(+), 3 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index 1a517c79..caae17ce 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2148,7 +2148,7 @@ def _read_dmi_syspath(key):
         # uninitialized dmi values show as all \xff and /sys appends a '\n'. 
         # in that event, return a string of '.' in the same length.
         if key_data == b'\xff' * (len(key_data) - 1) + b'\n':
-            key_data = b'.' * (len(key_data) - 1) + b'\n'
+            key_data = b""
 
         str_data = key_data.decode('utf8').strip()
         LOG.debug("dmi data %s returned %s", dmi_key_path, str_data)
@@ -2193,7 +2193,10 @@ def read_dmi_data(key):
 
     dmidecode_path = which('dmidecode')
     if dmidecode_path:
-        return _call_dmidecode(key, dmidecode_path)
+        ret = _call_dmidecode(key, dmidecode_path)
+        if ret is not None and ret.replace(".", "") == "":
+            return ""
+        return ret
 
     LOG.warn("did not find either path %s or dmidecode command",
              DMI_SYS_PATH)
diff --git a/tests/unittests/test_util.py b/tests/unittests/test_util.py
index 542e4075..bdee9719 100644
--- a/tests/unittests/test_util.py
+++ b/tests/unittests/test_util.py
@@ -388,7 +388,7 @@ class TestReadDMIData(helpers.FilesystemMockingTestCase):
     def test_dots_returned_instead_of_foxfox(self):
         my_len = 32
         dmi_value = b'\xff' * my_len + b'\n'
-        expected = '.' * my_len
+        expected = ""
         dmi_key = 'system-product-name'
         sysfs_key = 'product_name'
         self._create_sysfs_file(sysfs_key, dmi_value)
-- 
cgit v1.2.3


From f8fe3182ac5e6b7b3b4a81e034e87bfd1327f82b Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 14 Mar 2016 09:25:50 -0400
Subject: change where we handle the translation

---
 cloudinit/util.py | 7 +++----
 1 file changed, 3 insertions(+), 4 deletions(-)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index caae17ce..f84f120e 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2168,6 +2168,8 @@ def _call_dmidecode(key, dmidecode_path):
         cmd = [dmidecode_path, "--string", key]
         (result, _err) = subp(cmd)
         LOG.debug("dmidecode returned '%s' for '%s'", result, key)
+        if result.replace(".", "") == "":
+            return ""
         return result
     except (IOError, OSError) as _err:
         LOG.debug('failed dmidecode cmd: %s\n%s', cmd, _err.message)
@@ -2193,10 +2195,7 @@ def read_dmi_data(key):
 
     dmidecode_path = which('dmidecode')
     if dmidecode_path:
-        ret = _call_dmidecode(key, dmidecode_path)
-        if ret is not None and ret.replace(".", "") == "":
-            return ""
-        return ret
+        return _call_dmidecode(key, dmidecode_path)
 
     LOG.warn("did not find either path %s or dmidecode command",
              DMI_SYS_PATH)
-- 
cgit v1.2.3


From 001057f01e698c3ca0c078d9535f05fdebec2d80 Mon Sep 17 00:00:00 2001
From: Scott Moser <smoser@ubuntu.com>
Date: Mon, 14 Mar 2016 09:34:46 -0400
Subject: strip return of dmidecode and do so before checking for all "."

---
 cloudinit/util.py | 1 +
 1 file changed, 1 insertion(+)

(limited to 'cloudinit')

diff --git a/cloudinit/util.py b/cloudinit/util.py
index f84f120e..f9e37a79 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -2168,6 +2168,7 @@ def _call_dmidecode(key, dmidecode_path):
         cmd = [dmidecode_path, "--string", key]
         (result, _err) = subp(cmd)
         LOG.debug("dmidecode returned '%s' for '%s'", result, key)
+        result = result.strip()
         if result.replace(".", "") == "":
             return ""
         return result
-- 
cgit v1.2.3