summaryrefslogtreecommitdiff
path: root/cloudinit
diff options
context:
space:
mode:
authorJoshua Harlow <harlowja@yahoo-inc.com>2013-03-19 15:53:16 -0700
committerJoshua Harlow <harlowja@yahoo-inc.com>2013-03-19 15:53:16 -0700
commit20a2d9961697fbd6ef0e74cd3770b6601b141bcd (patch)
treebc6660c6fb2c6b88875d0ff8cec23ff3af00887f /cloudinit
parent5f3aaf5300825a3e586c9369aa4c1d917b448811 (diff)
downloadvyos-cloud-init-20a2d9961697fbd6ef0e74cd3770b6601b141bcd.tar.gz
vyos-cloud-init-20a2d9961697fbd6ef0e74cd3770b6601b141bcd.zip
Move back to using boto for now.
Diffstat (limited to 'cloudinit')
-rw-r--r--cloudinit/ec2_utils.py183
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py7
-rw-r--r--cloudinit/sources/DataSourceEc2.py7
3 files changed, 45 insertions, 152 deletions
diff --git a/cloudinit/ec2_utils.py b/cloudinit/ec2_utils.py
index 71c84206..29393ce1 100644
--- a/cloudinit/ec2_utils.py
+++ b/cloudinit/ec2_utils.py
@@ -16,146 +16,45 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from urlparse import (urlparse, urlunparse)
+import boto.utils as boto_utils
+
+# Versions of boto >= 2.6.0 (and possibly 2.5.2)
+# try to lazily load the metadata backing, which
+# doesn't work so well in cloud-init especially
+# since the metadata is serialized and actions are
+# performed where the metadata server may be blocked
+# (thus the datasource will start failing) resulting
+# in url exceptions when fields that do exist (or
+# would have existed) do not exist due to the blocking
+# that occurred.
+
+
+def _unlazy_dict(mp):
+ if not isinstance(mp, (dict)):
+ return mp
+ # Walk over the keys/values which
+ # forces boto to unlazy itself and
+ # has no effect on dictionaries that
+ # already have there items.
+ for (_k, v) in mp.items():
+ _unlazy_dict(v)
+ return mp
+
+
+def get_instance_userdata(api_version, metadata_address):
+ # Note: boto.utils.get_instance_metadata returns '' for empty string
+ # so the change from non-true to '' is not specifically necessary, but
+ # this way cloud-init will get consistent behavior even if boto changed
+ # in the future to return a None on "no user-data provided".
+ ud = boto_utils.get_instance_userdata(api_version, None, metadata_address)
+ if not ud:
+ ud = ''
+ return ud
+
+
+def get_instance_metadata(api_version, metadata_address):
+ metadata = boto_utils.get_instance_metadata(api_version, metadata_address)
+ if not isinstance(metadata, (dict)):
+ metadata = {}
+ return _unlazy_dict(metadata)
-import json
-import urllib
-
-from cloudinit import log as logging
-from cloudinit import util
-
-LOG = logging.getLogger(__name__)
-
-
-def combine_url(base, add_on):
- base_parsed = list(urlparse(base))
- path = base_parsed[2]
- if path and not path.endswith("/"):
- path += "/"
- path += urllib.quote(str(add_on), safe="/:")
- base_parsed[2] = path
- return urlunparse(base_parsed)
-
-
-# See: http://bit.ly/TyoUQs
-#
-# Since boto metadata reader uses the old urllib which does not
-# support ssl, we need to ahead and create our own reader which
-# works the same as the boto one (for now).
-class MetadataMaterializer(object):
- def __init__(self, blob, base_url, ssl_details):
- self._blob = blob
- self._md = None
- self._base_url = base_url
- self._ssl_details = ssl_details
-
- def _parse(self, blob):
- leaves = {}
- children = []
- if not blob:
- return (leaves, children)
-
- def has_children(item):
- if item.endswith("/"):
- return True
- else:
- return False
-
- def get_name(item):
- if item.endswith("/"):
- return item.rstrip("/")
- return item
-
- for field in blob.splitlines():
- field = field.strip()
- field_name = get_name(field)
- if not field or not field_name:
- continue
- if has_children(field):
- if field_name not in children:
- children.append(field_name)
- else:
- contents = field.split("=", 1)
- resource = field_name
- if len(contents) > 1:
- # What a PITA...
- (ident, sub_contents) = contents
- checked_ident = util.safe_int(ident)
- if checked_ident is not None:
- resource = "%s/openssh-key" % (checked_ident)
- field_name = sub_contents
- leaves[field_name] = resource
- return (leaves, children)
-
- def materialize(self):
- if self._md is not None:
- return self._md
- self._md = self._materialize(self._blob, self._base_url)
- return self._md
-
- def _fetch_url(self, url):
- response = util.read_file_or_url(url, ssl_details=self._ssl_details)
- return str(response)
-
- def _decode_leaf_blob(self, blob):
- if not blob:
- return blob
- stripped_blob = blob.strip()
- if stripped_blob.startswith("{") and stripped_blob.endswith("}"):
- # Assume and try with json
- try:
- return json.loads(blob)
- except (ValueError, TypeError):
- pass
- if blob.find("\n") != -1:
- return blob.splitlines()
- return blob
-
- def _materialize(self, blob, base_url):
- (leaves, children) = self._parse(blob)
- child_contents = {}
- for c in children:
- child_url = combine_url(base_url, c)
- if not child_url.endswith("/"):
- child_url += "/"
- child_blob = self._fetch_url(child_url)
- child_contents[c] = self._materialize(child_blob, child_url)
- leaf_contents = {}
- for (field, resource) in leaves.items():
- leaf_url = combine_url(base_url, resource)
- leaf_blob = self._fetch_url(leaf_url)
- leaf_contents[field] = self._decode_leaf_blob(leaf_blob)
- joined = {}
- joined.update(child_contents)
- for field in leaf_contents.keys():
- if field in joined:
- LOG.warn("Duplicate key found in results from %s", base_url)
- else:
- joined[field] = leaf_contents[field]
- return joined
-
-
-def get_instance_userdata(version='latest', url='http://169.254.169.254',
- ssl_details=None):
- ud_url = combine_url(url, version)
- ud_url = combine_url(ud_url, 'user-data')
- try:
- response = util.read_file_or_url(ud_url, ssl_details=ssl_details)
- return str(response)
- except Exception:
- util.logexc(LOG, "Failed fetching userdata from url %s", ud_url)
- return None
-
-
-def get_instance_metadata(version='latest', url='http://169.254.169.254',
- ssl_details=None, timeout=5, num_retries=5):
- md_url = combine_url(metadata_address, version)
- md_url = combine_url(md_url, 'meta-data')
- try:
- response = util.read_file_or_url(md_url, ssl_details=ssl_details,
- timeout=timeout, retries=num_retries)
- materializer = MetadataMaterializer(str(response), md_url, ssl_details)
- return materializer.materialize()
- except Exception:
- util.logexc(LOG, "Failed fetching metadata from url %s", md_url)
- return None
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 7fd38982..b4ca6d93 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -102,13 +102,10 @@ class DataSourceCloudStack(sources.DataSource):
return False
start_time = time.time()
md_addr = self.metadata_address
- ssl_details = util.fetch_ssl_details(self.paths)
self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver,
- md_addr,
- ssl_details)
+ md_addr)
self.metadata = ec2_utils.get_instance_metadata(self.api_ver,
- md_addr,
- ssl_details)
+ md_addr)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
return True
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 0d62cf01..bd35c8b0 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -62,13 +62,10 @@ class DataSourceEc2(sources.DataSource):
return False
start_time = time.time()
md_addr = self.metadata_address
- ssl_details = util.fetch_ssl_details(self.paths)
self.userdata_raw = ec2_utils.get_instance_userdata(self.api_ver,
- md_addr,
- ssl_details)
+ md_addr)
self.metadata = ec2_utils.get_instance_metadata(self.api_ver,
- md_addr,
- ssl_details)
+ md_addr)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
return True