summaryrefslogtreecommitdiff
path: root/cloudinit/url_helper.py
diff options
context:
space:
mode:
Diffstat (limited to 'cloudinit/url_helper.py')
-rw-r--r--cloudinit/url_helper.py51
1 files changed, 41 insertions, 10 deletions
diff --git a/cloudinit/url_helper.py b/cloudinit/url_helper.py
index 5c33d1e4..76a8e29b 100644
--- a/cloudinit/url_helper.py
+++ b/cloudinit/url_helper.py
@@ -20,6 +20,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import httplib
import time
import urllib
@@ -33,6 +34,8 @@ from cloudinit import version
LOG = logging.getLogger(__name__)
+NOT_FOUND = httplib.NOT_FOUND
+
# Check if requests has ssl support (added in requests >= 0.8.8)
SSL_ENABLED = False
CONFIG_ENABLED = False # This was added in 0.7 (but taken out in >=1.0)
@@ -76,6 +79,31 @@ def combine_url(base, *add_ons):
return url
+# Made to have same accessors as UrlResponse so that the
+# read_file_or_url can return this or that object and the
+# 'user' of those objects will not need to know the difference.
+class StringResponse(object):
+ def __init__(self, contents, code=200):
+ self.code = code
+ self.headers = {}
+ self.contents = contents
+ self.url = None
+
+ def ok(self, *args, **kwargs): # pylint: disable=W0613
+ if self.code != 200:
+ return False
+ return True
+
+ def __str__(self):
+ return self.contents
+
+
+class FileResponse(StringResponse):
+ def __init__(self, path, contents, code=200):
+ StringResponse.__init__(self, contents, code=code)
+ self.url = path
+
+
class UrlResponse(object):
def __init__(self, response):
self._response = response
@@ -146,17 +174,19 @@ def existsurl(url, ssl_details=None, timeout=None):
def readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True):
+ check_status=True, allow_redirects=True, exception_cb=None):
return _readurl(url, data=data, timeout=timeout, retries=retries,
sec_between=sec_between, headers=headers,
headers_cb=headers_cb, ssl_details=ssl_details,
check_status=check_status,
- allow_redirects=allow_redirects)
+ allow_redirects=allow_redirects,
+ exception_cb=exception_cb)
def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
headers=None, headers_cb=None, ssl_details=None,
- check_status=True, allow_redirects=True, method='GET'):
+ check_status=True, allow_redirects=True, exception_cb=None,
+ method='GET'):
url = _cleanurl(url)
req_args = {
'url': url,
@@ -203,14 +233,13 @@ def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# Handle retrying ourselves since the built-in support
# doesn't handle sleeping between tries...
for i in range(0, manual_tries):
+ req_args['headers'] = headers_cb(url)
+ filtered_req_args = {}
+ for (k, v) in req_args.items():
+ if k == 'data':
+ continue
+ filtered_req_args[k] = v
try:
- req_args['headers'] = headers_cb(url)
- filtered_req_args = {}
- for (k, v) in req_args.items():
- if k == 'data':
- continue
- filtered_req_args[k] = v
-
LOG.debug("[%s/%s] open '%s' with %s configuration", i,
manual_tries, url, filtered_req_args)
@@ -236,6 +265,8 @@ def _readurl(url, data=None, timeout=None, retries=0, sec_between=1,
# ssl exceptions are not going to get fixed by waiting a
# few seconds
break
+ if exception_cb and not exception_cb(req_args.copy(), excps[-1]):
+ break
if i + 1 < manual_tries and sec_between > 0:
LOG.debug("Please wait %s seconds while we wait to try again",
sec_between)