summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2011-07-26 10:22:23 -0400
committerScott Moser <smoser@ubuntu.com>2011-07-26 10:22:23 -0400
commita6dce5ac548de073918d679503f447d265847066 (patch)
tree48947d67bf07116016b4c9d3fe4c97b65a5a7316
parent690086473dccc7489dcb21ccade9c057762e35a3 (diff)
downloadvyos-cloud-init-a6dce5ac548de073918d679503f447d265847066.tar.gz
vyos-cloud-init-a6dce5ac548de073918d679503f447d265847066.zip
make 'include-once' really "download source once per-instance"
Marc's implementation would only ever process the include-once urls a single time. This changes that to process them every time, with the second time coming from a file on disk rather than the url. You can then do expiring or one time use URLs in the include-once and have all function of if the content was there every time. The cached file is readable by root-only.
-rw-r--r--cloudinit/UserDataHandler.py20
-rw-r--r--doc/userdata.txt4
2 files changed, 16 insertions, 8 deletions
diff --git a/cloudinit/UserDataHandler.py b/cloudinit/UserDataHandler.py
index 4fd6ef28..4ac0e2cd 100644
--- a/cloudinit/UserDataHandler.py
+++ b/cloudinit/UserDataHandler.py
@@ -22,6 +22,7 @@ from email.mime.text import MIMEText
from email.mime.base import MIMEBase
from email import encoders
import yaml
+from cloudinit import util, get_ipath_cur
starts_with_mappings={
'#include' : 'text/x-include-url',
@@ -61,16 +62,21 @@ def do_include(str,parts):
elif line.startswith("#include"):
line = line[len("#include"):].lstrip()
if line.startswith("#"): continue
- if includeonce == True:
- uniquestring = base64.encodestring(line).strip('\n')
- includeonce_filename = "/var/lib/cloud/instance/.includeonce.%s" % uniquestring
- if os.path.isfile(includeonce_filename): continue
- includeonce_file = open(includeonce_filename,'w')
- includeonce_file.close()
+
+ # urls cannot not have leading or trailing white space
+ uniquestring = base64.encodestring(line).strip()
+ includeonce_filename = "%/urlcache/%s" % (get_ipath_cur("data"), uniquestring)
try:
- content = urllib.urlopen(line).read()
+ if includeonce and os.path.isfile(includeonce_filename):
+ with open(includeonce_filename, "r") as fp:
+ content = fp.read()
+ else:
+ content = urllib.urlopen(line).read()
+ if includeonce:
+ util.write_file(includeonce_filename, content, mode=0600)
except Exception as e:
log.debug(traceback.format_exc(e))
+
process_includes(email.message_from_string(decomp_str(content)),parts)
diff --git a/doc/userdata.txt b/doc/userdata.txt
index 3af1e632..cc691ae6 100644
--- a/doc/userdata.txt
+++ b/doc/userdata.txt
@@ -42,7 +42,9 @@ finds. However, certain types of user-data are handled specially.
urls, one per line. Each of the URLs will be read, and their content
will be passed through this same set of rules. Ie, the content
read from the URL can be gzipped, mime-multi-part, or plain text
- This file will just be processed once by cloud-init
+ This file will just be downloaded only once per instance, and its
+ contents cached for subsequent boots. This allows you to pass in
+ one-time-use or expiring URLs.
* Cloud Config Data
begins with #cloud-config or Content-Type: text/cloud-config