summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/config/cc_snappy.py4
-rw-r--r--cloudinit/distros/__init__.py1
-rw-r--r--cloudinit/handlers/__init__.py11
-rw-r--r--cloudinit/user_data.py9
-rw-r--r--cloudinit/util.py8
5 files changed, 23 insertions, 10 deletions
diff --git a/cloudinit/config/cc_snappy.py b/cloudinit/config/cc_snappy.py
index 6a7ae09b..bfe76558 100644
--- a/cloudinit/config/cc_snappy.py
+++ b/cloudinit/config/cc_snappy.py
@@ -72,7 +72,7 @@ def parse_filename(fname):
name = fname_noext.partition("_")[0]
shortname = name.partition(".")[0]
return(name, shortname, fname_noext)
-
+
def get_fs_package_ops(fspath):
if not fspath:
@@ -98,7 +98,7 @@ def makeop(op, name, config=None, path=None, cfgfile=None):
def get_package_config(configs, name):
# load the package's config from the configs dict.
- # prefer full-name entry (config-example.canonical)
+ # prefer full-name entry (config-example.canonical)
# over short name entry (config-example)
if name in configs:
return configs[name]
diff --git a/cloudinit/distros/__init__.py b/cloudinit/distros/__init__.py
index c699a65a..05721922 100644
--- a/cloudinit/distros/__init__.py
+++ b/cloudinit/distros/__init__.py
@@ -322,6 +322,7 @@ class Distro(object):
"gecos": '--comment',
"homedir": '--home',
"primary_group": '--gid',
+ "uid": '--uid',
"groups": '--groups',
"passwd": '--password',
"shell": '--shell',
diff --git a/cloudinit/handlers/__init__.py b/cloudinit/handlers/__init__.py
index d62fcd19..53d5604a 100644
--- a/cloudinit/handlers/__init__.py
+++ b/cloudinit/handlers/__init__.py
@@ -170,12 +170,12 @@ def _extract_first_or_bytes(blob, size):
start = blob.split("\n", 1)[0]
else:
# We want to avoid decoding the whole blob (it might be huge)
- # By taking 4*size bytes we have a guarantee to decode size utf8 chars
- start = blob[:4*size].decode(errors='ignore').split("\n", 1)[0]
+ # By taking 4*size bytes we guarantee to decode size utf8 chars
+ start = blob[:4 * size].decode(errors='ignore').split("\n", 1)[0]
if len(start) >= size:
start = start[:size]
except UnicodeDecodeError:
- # Bytes array doesn't contain a text object -- return chunk of raw bytes
+ # Bytes array doesn't contain text so return chunk of raw bytes
start = blob[0:size]
return start
@@ -263,7 +263,10 @@ def fixup_handler(mod, def_freq=PER_INSTANCE):
def type_from_starts_with(payload, default=None):
- payload_lc = payload.lower()
+ try:
+ payload_lc = util.decode_binary(payload).lower()
+ except UnicodeDecodeError:
+ return default
payload_lc = payload_lc.lstrip()
for text in INCLUSION_SRCH:
if payload_lc.startswith(text):
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index eb3c7336..f7c5787c 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -49,6 +49,7 @@ INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
+ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
@@ -265,11 +266,15 @@ class UserDataProcessor(object):
content = ent.get('content', '')
mtype = ent.get('type')
if not mtype:
- mtype = handlers.type_from_starts_with(content,
- ARCHIVE_UNDEF_TYPE)
+ default = ARCHIVE_UNDEF_TYPE
+ if isinstance(content, six.binary_type):
+ default = ARCHIVE_UNDEF_BINARY_TYPE
+ mtype = handlers.type_from_starts_with(content, default)
maintype, subtype = mtype.split('/', 1)
if maintype == "text":
+ if isinstance(content, six.binary_type):
+ content = content.decode()
msg = MIMEText(content, _subtype=subtype)
else:
msg = MIMEBase(maintype, subtype)
diff --git a/cloudinit/util.py b/cloudinit/util.py
index 971c1c2d..cae57770 100644
--- a/cloudinit/util.py
+++ b/cloudinit/util.py
@@ -121,8 +121,12 @@ def fully_decoded_payload(part):
if (six.PY3 and
part.get_content_maintype() == 'text' and
isinstance(cte_payload, bytes)):
- charset = part.get_charset() or 'utf-8'
- return cte_payload.decode(charset, errors='surrogateescape')
+ charset = part.get_charset()
+ if charset and charset.input_codec:
+ encoding = charset.input_codec
+ else:
+ encoding = 'utf-8'
+ return cte_payload.decode(encoding, errors='surrogateescape')
return cte_payload