summaryrefslogtreecommitdiff
path: root/cloudinit/user_data.py
diff options
context:
space:
mode:
authorJames Falcon <james.falcon@canonical.com>2021-12-15 20:16:38 -0600
committerGitHub <noreply@github.com>2021-12-15 19:16:38 -0700
commitbae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf (patch)
tree1fbb3269fc87e39832e3286ef42eefd2b23fcd44 /cloudinit/user_data.py
parent2bcf4fa972fde686c2e3141c58e640640b44dd00 (diff)
downloadvyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.tar.gz
vyos-cloud-init-bae9b11da9ed7dd0b16fe5adeaf4774b7cc628cf.zip
Adopt Black and isort (SC-700) (#1157)
Applied Black and isort, fixed any linting issues, updated tox.ini and CI.
Diffstat (limited to 'cloudinit/user_data.py')
-rw-r--r--cloudinit/user_data.py121
1 files changed, 67 insertions, 54 deletions
diff --git a/cloudinit/user_data.py b/cloudinit/user_data.py
index 1317e063..05c66741 100644
--- a/cloudinit/user_data.py
+++ b/cloudinit/user_data.py
@@ -14,11 +14,10 @@ from email.mime.multipart import MIMEMultipart
from email.mime.nonmultipart import MIMENonMultipart
from email.mime.text import MIMEText
-from cloudinit import handlers
+from cloudinit import features, handlers
from cloudinit import log as logging
-from cloudinit import features
-from cloudinit.url_helper import read_file_or_url, UrlError
from cloudinit import util
+from cloudinit.url_helper import UrlError, read_file_or_url
LOG = logging.getLogger(__name__)
@@ -28,11 +27,11 @@ PART_FN_TPL = handlers.PART_FN_TPL
OCTET_TYPE = handlers.OCTET_TYPE
# Saves typing errors
-CONTENT_TYPE = 'Content-Type'
+CONTENT_TYPE = "Content-Type"
# Various special content types that cause special actions
TYPE_NEEDED = ["text/plain", "text/x-not-multipart"]
-INCLUDE_TYPES = ['text/x-include-url', 'text/x-include-once-url']
+INCLUDE_TYPES = ["text/x-include-url", "text/x-include-once-url"]
ARCHIVE_TYPES = ["text/cloud-config-archive"]
UNDEF_TYPE = "text/plain"
ARCHIVE_UNDEF_TYPE = "text/cloud-config"
@@ -40,18 +39,18 @@ ARCHIVE_UNDEF_BINARY_TYPE = "application/octet-stream"
# This seems to hit most of the gzip possible content types.
DECOMP_TYPES = [
- 'application/gzip',
- 'application/gzip-compressed',
- 'application/gzipped',
- 'application/x-compress',
- 'application/x-compressed',
- 'application/x-gunzip',
- 'application/x-gzip',
- 'application/x-gzip-compressed',
+ "application/gzip",
+ "application/gzip-compressed",
+ "application/gzipped",
+ "application/x-compress",
+ "application/x-compressed",
+ "application/x-gunzip",
+ "application/x-gzip",
+ "application/x-gzip-compressed",
]
# Msg header used to track attachments
-ATTACHMENT_FIELD = 'Number-Attachments'
+ATTACHMENT_FIELD = "Number-Attachments"
# Only the following content types can have there launch index examined
# in there payload, evey other content type can still provide a header
@@ -64,9 +63,8 @@ def _replace_header(msg, key, value):
def _set_filename(msg, filename):
- del msg['Content-Disposition']
- msg.add_header('Content-Disposition',
- 'attachment', filename=str(filename))
+ del msg["Content-Disposition"]
+ msg.add_header("Content-Disposition", "attachment", filename=str(filename))
def _handle_error(error_message, source_exception=None):
@@ -91,7 +89,6 @@ class UserDataProcessor(object):
return accumulating_msg
def _process_msg(self, base_msg, append_msg):
-
def find_ctype(payload):
return handlers.type_from_starts_with(payload)
@@ -118,7 +115,9 @@ class UserDataProcessor(object):
error_message = (
"Failed decompressing payload from {} of"
" length {} due to: {}".format(
- ctype_orig, len(payload), e))
+ ctype_orig, len(payload), e
+ )
+ )
_handle_error(error_message, e)
continue
@@ -130,7 +129,7 @@ class UserDataProcessor(object):
# to check the true MIME type for x-shellscript type since all
# shellscript payloads must have a #! header. The other MIME types
# that cloud-init supports do not have the same guarantee.
- if ctype_orig in TYPE_NEEDED + ['text/x-shellscript']:
+ if ctype_orig in TYPE_NEEDED + ["text/x-shellscript"]:
ctype = find_ctype(payload)
if ctype is None:
ctype = ctype_orig
@@ -148,7 +147,7 @@ class UserDataProcessor(object):
# after decoding and decompression.
if part.get_filename():
_set_filename(n_part, part.get_filename())
- for h in ('Launch-Index',):
+ for h in ("Launch-Index",):
if h in part:
_replace_header(n_part, h, str(part[h]))
part = n_part
@@ -171,7 +170,7 @@ class UserDataProcessor(object):
self._attach_part(append_msg, part)
def _attach_launch_index(self, msg):
- header_idx = msg.get('Launch-Index', None)
+ header_idx = msg.get("Launch-Index", None)
payload_idx = None
if msg.get_content_type() in EXAMINE_FOR_LAUNCH_INDEX:
try:
@@ -179,7 +178,7 @@ class UserDataProcessor(object):
# that might affect the final header
payload = util.load_yaml(msg.get_payload(decode=True))
if payload:
- payload_idx = payload.get('launch-index')
+ payload_idx = payload.get("launch-index")
except Exception:
pass
# Header overrides contents, for now (?) or the other way around?
@@ -190,14 +189,15 @@ class UserDataProcessor(object):
payload_idx = header_idx
if payload_idx is not None:
try:
- msg.add_header('Launch-Index', str(int(payload_idx)))
+ msg.add_header("Launch-Index", str(int(payload_idx)))
except (ValueError, TypeError):
pass
def _get_include_once_filename(self, entry):
- entry_fn = util.hash_blob(entry, 'md5', 64)
- return os.path.join(self.paths.get_ipath_cur('data'),
- 'urlcache', entry_fn)
+ entry_fn = util.hash_blob(entry, "md5", 64)
+ return os.path.join(
+ self.paths.get_ipath_cur("data"), "urlcache", entry_fn
+ )
def _process_before_attach(self, msg, attached_id):
if not msg.get_filename():
@@ -212,13 +212,13 @@ class UserDataProcessor(object):
for line in content.splitlines():
lc_line = line.lower()
if lc_line.startswith("#include-once"):
- line = line[len("#include-once"):].lstrip()
+ line = line[len("#include-once") :].lstrip()
# Every following include will now
# not be refetched.... but will be
# re-read from a local urlcache (if it worked)
include_once_on = True
elif lc_line.startswith("#include"):
- line = line[len("#include"):].lstrip()
+ line = line[len("#include") :].lstrip()
# Disable the include once if it was on
# if it wasn't, then this has no effect.
include_once_on = False
@@ -236,29 +236,37 @@ class UserDataProcessor(object):
content = util.load_file(include_once_fn)
else:
try:
- resp = read_file_or_url(include_url, timeout=5, retries=10,
- ssl_details=self.ssl_details)
+ resp = read_file_or_url(
+ include_url,
+ timeout=5,
+ retries=10,
+ ssl_details=self.ssl_details,
+ )
if include_once_on and resp.ok():
- util.write_file(include_once_fn, resp.contents,
- mode=0o600)
+ util.write_file(
+ include_once_fn, resp.contents, mode=0o600
+ )
if resp.ok():
content = resp.contents
else:
error_message = (
"Fetching from {} resulted in"
" a invalid http code of {}".format(
- include_url, resp.code))
+ include_url, resp.code
+ )
+ )
_handle_error(error_message)
except UrlError as urle:
message = str(urle)
# Older versions of requests.exceptions.HTTPError may not
# include the errant url. Append it for clarity in logs.
if include_url not in message:
- message += ' for url: {0}'.format(include_url)
+ message += " for url: {0}".format(include_url)
_handle_error(message, urle)
except IOError as ioe:
error_message = "Fetching from {} resulted in {}".format(
- include_url, ioe)
+ include_url, ioe
+ )
_handle_error(error_message, ioe)
if content is not None:
@@ -275,20 +283,20 @@ class UserDataProcessor(object):
# or
# scalar(payload)
if isinstance(ent, str):
- ent = {'content': ent}
+ ent = {"content": ent}
if not isinstance(ent, (dict)):
# TODO(harlowja) raise?
continue
- content = ent.get('content', '')
- mtype = ent.get('type')
+ content = ent.get("content", "")
+ mtype = ent.get("type")
if not mtype:
default = ARCHIVE_UNDEF_TYPE
if isinstance(content, bytes):
default = ARCHIVE_UNDEF_BINARY_TYPE
mtype = handlers.type_from_starts_with(content, default)
- maintype, subtype = mtype.split('/', 1)
+ maintype, subtype = mtype.split("/", 1)
if maintype == "text":
if isinstance(content, bytes):
content = content.decode()
@@ -297,16 +305,21 @@ class UserDataProcessor(object):
msg = MIMEBase(maintype, subtype)
msg.set_payload(content)
- if 'filename' in ent:
- _set_filename(msg, ent['filename'])
- if 'launch-index' in ent:
- msg.add_header('Launch-Index', str(ent['launch-index']))
+ if "filename" in ent:
+ _set_filename(msg, ent["filename"])
+ if "launch-index" in ent:
+ msg.add_header("Launch-Index", str(ent["launch-index"]))
for header in list(ent.keys()):
- if header.lower() in ('content', 'filename', 'type',
- 'launch-index', 'content-disposition',
- ATTACHMENT_FIELD.lower(),
- CONTENT_TYPE.lower()):
+ if header.lower() in (
+ "content",
+ "filename",
+ "type",
+ "launch-index",
+ "content-disposition",
+ ATTACHMENT_FIELD.lower(),
+ CONTENT_TYPE.lower(),
+ ):
continue
msg.add_header(header, ent[header])
@@ -318,7 +331,7 @@ class UserDataProcessor(object):
at its 'Number-Attachments' header.
"""
if ATTACHMENT_FIELD not in outer_msg:
- outer_msg[ATTACHMENT_FIELD] = '0'
+ outer_msg[ATTACHMENT_FIELD] = "0"
if new_count is not None:
_replace_header(outer_msg, ATTACHMENT_FIELD, str(new_count))
@@ -344,8 +357,8 @@ class UserDataProcessor(object):
def is_skippable(part):
# multipart/* are just containers
- part_maintype = part.get_content_maintype() or ''
- if part_maintype.lower() == 'multipart':
+ part_maintype = part.get_content_maintype() or ""
+ if part_maintype.lower() == "multipart":
return True
return False
@@ -355,7 +368,7 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
"""convert a string (more likely bytes) or a message into
a mime message."""
if not raw_data:
- raw_data = b''
+ raw_data = b""
def create_binmsg(data, content_type):
maintype, subtype = content_type.split("/", 1)
@@ -364,12 +377,12 @@ def convert_string(raw_data, content_type=NOT_MULTIPART_TYPE):
return msg
if isinstance(raw_data, str):
- bdata = raw_data.encode('utf-8')
+ bdata = raw_data.encode("utf-8")
else:
bdata = raw_data
bdata = util.decomp_gzip(bdata, decode=False)
if b"mime-version:" in bdata[0:4096].lower():
- msg = util.message_from_string(bdata.decode('utf-8'))
+ msg = util.message_from_string(bdata.decode("utf-8"))
else:
msg = create_binmsg(bdata, content_type)