diff options
25 files changed, 879 insertions, 298 deletions
diff --git a/azurelinuxagent/common/dhcp.py b/azurelinuxagent/common/dhcp.py index d5c90cb..66346b5 100644 --- a/azurelinuxagent/common/dhcp.py +++ b/azurelinuxagent/common/dhcp.py @@ -84,7 +84,7 @@ class DhcpHandler(object): :return: True if a route to KNOWN_WIRESERVER_IP exists. """ route_exists = False - logger.info("test for route to {0}".format(KNOWN_WIRESERVER_IP)) + logger.info("Test for route to {0}".format(KNOWN_WIRESERVER_IP)) try: route_file = '/proc/net/route' if os.path.exists(route_file) and \ @@ -95,13 +95,12 @@ class DhcpHandler(object): self.gateway = None self.routes = None route_exists = True - logger.info("route to {0} exists".format(KNOWN_WIRESERVER_IP)) + logger.info("Route to {0} exists".format(KNOWN_WIRESERVER_IP)) else: - logger.warn( - "no route exists to {0}".format(KNOWN_WIRESERVER_IP)) + logger.warn("No route exists to {0}".format(KNOWN_WIRESERVER_IP)) except Exception as e: logger.error( - "could not determine whether route exists to {0}: {1}".format( + "Could not determine whether route exists to {0}: {1}".format( KNOWN_WIRESERVER_IP, e)) return route_exists @@ -118,12 +117,12 @@ class DhcpHandler(object): exists = False - logger.info("checking for dhcp lease cache") + logger.info("Checking for dhcp lease cache") cached_endpoint = self.osutil.get_dhcp_lease_endpoint() if cached_endpoint is not None: self.endpoint = cached_endpoint exists = True - logger.info("cache exists [{0}]".format(exists)) + logger.info("Cache exists [{0}]".format(exists)) return exists def conf_routes(self): diff --git a/azurelinuxagent/common/event.py b/azurelinuxagent/common/event.py index 9265820..ce79adf 100644 --- a/azurelinuxagent/common/event.py +++ b/azurelinuxagent/common/event.py @@ -46,6 +46,7 @@ class WALAEventOperation: Install = "Install" InitializeHostPlugin = "InitializeHostPlugin" Provision = "Provision" + ReportStatus = "ReportStatus" Restart = "Restart" UnhandledError = "UnhandledError" UnInstall = "UnInstall" @@ -65,8 +66,17 @@ class EventLogger(object): if not os.path.exists(self.event_dir): os.mkdir(self.event_dir) os.chmod(self.event_dir, 0o700) - if len(os.listdir(self.event_dir)) > 1000: - raise EventError("Too many files under: {0}".format(self.event_dir)) + + existing_events = os.listdir(self.event_dir) + if len(existing_events) >= 1000: + existing_events.sort() + oldest_files = existing_events[:-999] + logger.warn("Too many files under: {0}, removing oldest".format(self.event_dir)) + try: + for f in oldest_files: + os.remove(os.path.join(self.event_dir, f)) + except IOError as e: + raise EventError(e) filename = os.path.join(self.event_dir, ustr(int(time.time() * 1000000))) @@ -101,6 +111,15 @@ class EventLogger(object): __event_logger__ = EventLogger() +def report_event(op, is_success=True, message=''): + from azurelinuxagent.common.version import AGENT_NAME, CURRENT_VERSION + add_event(AGENT_NAME, + version=CURRENT_VERSION, + is_success=is_success, + message=message, + op=op) + + def add_event(name, op="", is_success=True, duration=0, version=CURRENT_VERSION, message="", evt_type="", is_internal=False, reporter=__event_logger__): diff --git a/azurelinuxagent/common/osutil/default.py b/azurelinuxagent/common/osutil/default.py index 4cd379b..59d5985 100644 --- a/azurelinuxagent/common/osutil/default.py +++ b/azurelinuxagent/common/osutil/default.py @@ -18,6 +18,7 @@ import multiprocessing import os +import platform import re import shutil import socket @@ -420,7 +421,12 @@ class DefaultOSUtil(object): """ iface='' expected=16 # how many devices should I expect... - struct_size=40 # for 64bit the size is 40 bytes + + # for 64bit the size is 40 bytes + # for 32bit the size is 32 bytes + python_arc = platform.architecture()[0] + struct_size = 32 if python_arc == '32bit' else 40 + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) @@ -440,11 +446,11 @@ class DefaultOSUtil(object): if len(iface) == 0 or self.is_loopback(iface) or iface != primary: # test the next one if len(iface) != 0 and not self.disable_route_warning: - logger.info('interface [{0}] skipped'.format(iface)) + logger.info('Interface [{0}] skipped'.format(iface)) continue else: # use this one - logger.info('interface [{0}] selected'.format(iface)) + logger.info('Interface [{0}] selected'.format(iface)) break return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) @@ -473,7 +479,7 @@ class DefaultOSUtil(object): primary_metric = None if not self.disable_route_warning: - logger.info("examine /proc/net/route for primary interface") + logger.info("Examine /proc/net/route for primary interface") with open('/proc/net/route') as routing_table: idx = 0 for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): @@ -500,12 +506,13 @@ class DefaultOSUtil(object): if not self.disable_route_warning: with open('/proc/net/route') as routing_table_fh: routing_table_text = routing_table_fh.read() - logger.error('could not determine primary interface, ' - 'please ensure /proc/net/route is correct:\n' - '{0}'.format(routing_table_text)) + logger.warn('Could not determine primary interface, ' + 'please ensure /proc/net/route is correct') + logger.warn('Contents of /proc/net/route:\n{0}'.format(routing_table_text)) + logger.warn('Primary interface examination will retry silently') self.disable_route_warning = True else: - logger.info('primary interface is [{0}]'.format(primary)) + logger.info('Primary interface is [{0}]'.format(primary)) self.disable_route_warning = False return primary diff --git a/azurelinuxagent/common/osutil/factory.py b/azurelinuxagent/common/osutil/factory.py index acd7f6e..eee9f97 100644 --- a/azurelinuxagent/common/osutil/factory.py +++ b/azurelinuxagent/common/osutil/factory.py @@ -76,6 +76,9 @@ def get_osutil(distro_name=DISTRO_NAME, else: return RedhatOSUtil() + elif distro_name == "euleros": + return RedhatOSUtil() + elif distro_name == "freebsd": return FreeBSDOSUtil() diff --git a/azurelinuxagent/common/protocol/hostplugin.py b/azurelinuxagent/common/protocol/hostplugin.py index bdae56e..70bf8b4 100644 --- a/azurelinuxagent/common/protocol/hostplugin.py +++ b/azurelinuxagent/common/protocol/hostplugin.py @@ -16,8 +16,17 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -from azurelinuxagent.common.protocol.wire import * + +import base64 +import json + +from azurelinuxagent.common import logger +from azurelinuxagent.common.exception import ProtocolError, HttpError +from azurelinuxagent.common.future import ustr, httpclient +from azurelinuxagent.common.utils import restutil from azurelinuxagent.common.utils import textutil +from azurelinuxagent.common.utils.textutil import remove_bom +from azurelinuxagent.common.version import PY_VERSION_MAJOR HOST_PLUGIN_PORT = 32526 URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" @@ -30,12 +39,15 @@ HEADER_VERSION = "x-ms-version" HEADER_HOST_CONFIG_NAME = "x-ms-host-config-name" HEADER_ARTIFACT_LOCATION = "x-ms-artifact-location" HEADER_ARTIFACT_MANIFEST_LOCATION = "x-ms-artifact-manifest-location" +MAXIMUM_PAGEBLOB_PAGE_SIZE = 4 * 1024 * 1024 # Max page size: 4MB class HostPluginProtocol(object): + _is_default_channel = False + def __init__(self, endpoint, container_id, role_config_name): if endpoint is None: - raise ProtocolError("Host plugin endpoint not provided") + raise ProtocolError("HostGAPlugin: Endpoint not provided") self.is_initialized = False self.is_available = False self.api_versions = None @@ -45,45 +57,51 @@ class HostPluginProtocol(object): self.role_config_name = role_config_name self.manifest_uri = None + @staticmethod + def is_default_channel(): + return HostPluginProtocol._is_default_channel + + @staticmethod + def set_default_channel(is_default): + HostPluginProtocol._is_default_channel = is_default + def ensure_initialized(self): if not self.is_initialized: self.api_versions = self.get_api_versions() self.is_available = API_VERSION in self.api_versions self.is_initialized = True - - from azurelinuxagent.common.event import add_event, WALAEventOperation - add_event(name="WALA", - op=WALAEventOperation.InitializeHostPlugin, - is_success=self.is_available) + from azurelinuxagent.common.event import WALAEventOperation, report_event + report_event(WALAEventOperation.InitializeHostPlugin, + is_success=self.is_available) return self.is_available def get_api_versions(self): url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, HOST_PLUGIN_PORT) - logger.verbose("getting API versions at [{0}]".format(url)) + logger.verbose("HostGAPlugin: Getting API versions at [{0}]".format( + url)) return_val = [] try: headers = {HEADER_CONTAINER_ID: self.container_id} response = restutil.http_get(url, headers) if response.status != httpclient.OK: logger.error( - "get API versions returned status code [{0}]".format( - response.status)) + "HostGAPlugin: Failed Get API versions: {0}".format( + self.read_response_error(response))) else: return_val = ustr(remove_bom(response.read()), encoding='utf-8') except HttpError as e: - logger.error("get API versions failed with [{0}]".format(e)) + logger.error("HostGAPlugin: Exception Get API versions: {0}".format(e)) return return_val def get_artifact_request(self, artifact_url, artifact_manifest_url=None): if not self.ensure_initialized(): - logger.error("host plugin channel is not available") - return None, None + raise ProtocolError("HostGAPlugin: Host plugin channel is not available") + if textutil.is_str_none_or_whitespace(artifact_url): - logger.error("no extension artifact url was provided") - return None, None + raise ProtocolError("HostGAPlugin: No extension artifact url was provided") url = URI_FORMAT_GET_EXTENSION_ARTIFACT.format(self.endpoint, HOST_PLUGIN_PORT) @@ -97,45 +115,6 @@ class HostPluginProtocol(object): return url, headers - def put_vm_status(self, status_blob, sas_url, config_blob_type=None): - """ - Try to upload the VM status via the host plugin /status channel - :param sas_url: the blob SAS url to pass to the host plugin - :param config_blob_type: the blob type from the extension config - :type status_blob: StatusBlob - """ - if not self.ensure_initialized(): - logger.error("host plugin channel is not available") - return - if status_blob is None or status_blob.vm_status is None: - logger.error("no status data was provided") - return - try: - url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) - logger.verbose("Posting VM status to host plugin") - status = textutil.b64encode(status_blob.data) - blob_type = status_blob.type if status_blob.type else config_blob_type - headers = {HEADER_VERSION: API_VERSION, - "Content-type": "application/json", - HEADER_CONTAINER_ID: self.container_id, - HEADER_HOST_CONFIG_NAME: self.role_config_name} - blob_headers = [{'headerName': 'x-ms-version', - 'headerValue': status_blob.__storage_version__}, - {'headerName': 'x-ms-blob-type', - 'headerValue': blob_type}] - data = json.dumps({'requestUri': sas_url, 'headers': blob_headers, - 'content': status}, sort_keys=True) - response = restutil.http_put(url, data=data, headers=headers) - if response.status != httpclient.OK: - logger.warn("PUT {0} [{1}: {2}]", - url, - response.status, - response.reason) - else: - logger.verbose("Successfully uploaded status to host plugin") - except Exception as e: - logger.error("Put VM status failed [{0}]", e) - def put_vm_log(self, content): """ Try to upload the given content to the host plugin @@ -147,13 +126,13 @@ class HostPluginProtocol(object): :return: """ if not self.ensure_initialized(): - logger.error("host plugin channel is not available") - return + raise ProtocolError("HostGAPlugin: Host plugin channel is not available") + if content is None \ or self.container_id is None \ or self.deployment_id is None: logger.error( - "invalid arguments passed: " + "HostGAPlugin: Invalid arguments passed: " "[{0}], [{1}], [{2}]".format( content, self.container_id, @@ -163,11 +142,158 @@ class HostPluginProtocol(object): headers = {"x-ms-vmagentlog-deploymentid": self.deployment_id, "x-ms-vmagentlog-containerid": self.container_id} - logger.info("put VM log at [{0}]".format(url)) + logger.info("HostGAPlugin: Put VM log to [{0}]".format(url)) try: response = restutil.http_put(url, content, headers) if response.status != httpclient.OK: - logger.error("put log returned status code [{0}]".format( + logger.error("HostGAPlugin: Put log failed: Code {0}".format( response.status)) except HttpError as e: - logger.error("put log failed with [{0}]".format(e)) + logger.error("HostGAPlugin: Put log exception: {0}".format(e)) + + def put_vm_status(self, status_blob, sas_url, config_blob_type=None): + """ + Try to upload the VM status via the host plugin /status channel + :param sas_url: the blob SAS url to pass to the host plugin + :param config_blob_type: the blob type from the extension config + :type status_blob: StatusBlob + """ + if not self.ensure_initialized(): + raise ProtocolError("HostGAPlugin: HostGAPlugin is not available") + + if status_blob is None or status_blob.vm_status is None: + raise ProtocolError("HostGAPlugin: Status blob was not provided") + + logger.verbose("HostGAPlugin: Posting VM status") + try: + blob_type = status_blob.type if status_blob.type else config_blob_type + + if blob_type == "BlockBlob": + self._put_block_blob_status(sas_url, status_blob) + else: + self._put_page_blob_status(sas_url, status_blob) + + if not HostPluginProtocol.is_default_channel(): + logger.info("HostGAPlugin: Setting host plugin as default channel") + HostPluginProtocol.set_default_channel(True) + except Exception as e: + message = "HostGAPlugin: Exception Put VM status: {0}".format(e) + logger.error(message) + from azurelinuxagent.common.event import WALAEventOperation, report_event + report_event(op=WALAEventOperation.ReportStatus, + is_success=False, + message=message) + logger.warn("HostGAPlugin: resetting default channel") + HostPluginProtocol.set_default_channel(False) + + def _put_block_blob_status(self, sas_url, status_blob): + url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) + + response = restutil.http_put(url, + data=self._build_status_data( + sas_url, + status_blob.get_block_blob_headers(len(status_blob.data)), + bytearray(status_blob.data, encoding='utf-8')), + headers=self._build_status_headers()) + + if response.status != httpclient.OK: + raise HttpError("HostGAPlugin: Put BlockBlob failed: {0}".format( + self.read_response_error(response))) + else: + logger.verbose("HostGAPlugin: Put BlockBlob status succeeded") + + def _put_page_blob_status(self, sas_url, status_blob): + url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) + + # Convert the status into a blank-padded string whose length is modulo 512 + status = bytearray(status_blob.data, encoding='utf-8') + status_size = int((len(status) + 511) / 512) * 512 + status = bytearray(status_blob.data.ljust(status_size), encoding='utf-8') + + # First, initialize an empty blob + response = restutil.http_put(url, + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_create_headers(status_size)), + headers=self._build_status_headers()) + + if response.status != httpclient.OK: + raise HttpError( + "HostGAPlugin: Failed PageBlob clean-up: {0}".format( + self.read_response_error(response))) + else: + logger.verbose("HostGAPlugin: PageBlob clean-up succeeded") + + # Then, upload the blob in pages + if sas_url.count("?") <= 0: + sas_url = "{0}?comp=page".format(sas_url) + else: + sas_url = "{0}&comp=page".format(sas_url) + + start = 0 + end = 0 + while start < len(status): + # Create the next page + end = start + min(len(status) - start, MAXIMUM_PAGEBLOB_PAGE_SIZE) + page_size = int((end - start + 511) / 512) * 512 + buf = bytearray(page_size) + buf[0: end - start] = status[start: end] + + # Send the page + response = restutil.http_put(url, + data=self._build_status_data( + sas_url, + status_blob.get_page_blob_page_headers(start, end), + buf), + headers=self._build_status_headers()) + + if response.status != httpclient.OK: + raise HttpError( + "HostGAPlugin Error: Put PageBlob bytes [{0},{1}]: " \ + "{2}".format( + start, end, self.read_response_error(response))) + + # Advance to the next page (if any) + start = end + + def _build_status_data(self, sas_url, blob_headers, content=None): + headers = [] + for name in iter(blob_headers.keys()): + headers.append({ + 'headerName': name, + 'headerValue': blob_headers[name] + }) + + data = { + 'requestUri': sas_url, + 'headers': headers + } + if not content is None: + data['content'] = self._base64_encode(content) + return json.dumps(data, sort_keys=True) + + def _build_status_headers(self): + return { + HEADER_VERSION: API_VERSION, + "Content-type": "application/json", + HEADER_CONTAINER_ID: self.container_id, + HEADER_HOST_CONFIG_NAME: self.role_config_name + } + + def _base64_encode(self, data): + s = base64.b64encode(bytes(data)) + if PY_VERSION_MAJOR > 2: + return s.decode('utf-8') + return s + + @staticmethod + def read_response_error(response): + if response is None: + return '' + body = remove_bom(response.read()) + if PY_VERSION_MAJOR < 3 and body is not None: + body = ustr(body, encoding='utf-8') + return "{0}, {1}, {2}".format( + response.status, + response.reason, + body) diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py index 71c3e37..265e2dd 100644 --- a/azurelinuxagent/common/protocol/wire.py +++ b/azurelinuxagent/common/protocol/wire.py @@ -370,11 +370,14 @@ class StatusBlob(object): __storage_version__ = "2014-02-14" + def prepare(self, blob_type): + logger.verbose("Prepare status blob") + self.data = self.to_json() + self.type = blob_type + def upload(self, url): # TODO upload extension only if content has changed - logger.verbose("Upload status blob") upload_successful = False - self.data = self.to_json() self.type = self.get_blob_type(url) try: if self.type == "BlockBlob": @@ -384,7 +387,12 @@ class StatusBlob(object): else: raise ProtocolError("Unknown blob type: {0}".format(self.type)) except HttpError as e: - logger.warn("Initial upload failed [{0}]".format(e)) + message = "Initial upload failed [{0}]".format(e) + logger.warn(message) + from azurelinuxagent.common.event import WALAEventOperation, report_event + report_event(op=WALAEventOperation.ReportStatus, + is_success=False, + message=message) else: logger.verbose("Uploading status blob succeeded") upload_successful = True @@ -411,48 +419,54 @@ class StatusBlob(object): logger.verbose("Blob type: [{0}]", blob_type) return blob_type + def get_block_blob_headers(self, blob_size): + return { + "Content-Length": ustr(blob_size), + "x-ms-blob-type": "BlockBlob", + "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "x-ms-version": self.__class__.__storage_version__ + } + def put_block_blob(self, url, data): logger.verbose("Put block blob") - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - resp = self.client.call_storage_service( - restutil.http_put, - url, - data, - { - "x-ms-date": timestamp, - "x-ms-blob-type": "BlockBlob", - "Content-Length": ustr(len(data)), - "x-ms-version": self.__class__.__storage_version__ - }) + headers = self.get_block_blob_headers(len(data)) + resp = self.client.call_storage_service(restutil.http_put, url, data, headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to upload block blob: {0}".format(resp.status)) + def get_page_blob_create_headers(self, blob_size): + return { + "Content-Length": "0", + "x-ms-blob-content-length": ustr(blob_size), + "x-ms-blob-type": "PageBlob", + "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "x-ms-version": self.__class__.__storage_version__ + } + + def get_page_blob_page_headers(self, start, end): + return { + "Content-Length": ustr(end - start), + "x-ms-date": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), + "x-ms-range": "bytes={0}-{1}".format(start, end - 1), + "x-ms-page-write": "update", + "x-ms-version": self.__class__.__storage_version__ + } + def put_page_blob(self, url, data): logger.verbose("Put page blob") - # Convert string into bytes + # Convert string into bytes and align to 512 bytes data = bytearray(data, encoding='utf-8') - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - - # Align to 512 bytes page_blob_size = int((len(data) + 511) / 512) * 512 - resp = self.client.call_storage_service( - restutil.http_put, - url, - "", - { - "x-ms-date": timestamp, - "x-ms-blob-type": "PageBlob", - "Content-Length": "0", - "x-ms-blob-content-length": ustr(page_blob_size), - "x-ms-version": self.__class__.__storage_version__ - }) + + headers = self.get_page_blob_create_headers(page_blob_size) + resp = self.client.call_storage_service(restutil.http_put, url, "", headers) if resp.status != httpclient.CREATED: raise UploadError( "Failed to clean up page blob: {0}".format(resp.status)) - if url.count("?") < 0: + if url.count("?") <= 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) @@ -469,17 +483,12 @@ class StatusBlob(object): buf_size = page_end - start buf = bytearray(buf_size) buf[0: content_size] = data[start: end] + headers = self.get_page_blob_page_headers(start, page_end) resp = self.client.call_storage_service( restutil.http_put, url, bytebuffer(buf), - { - "x-ms-date": timestamp, - "x-ms-range": "bytes={0}-{1}".format(start, page_end - 1), - "x-ms-page-write": "update", - "x-ms-version": self.__class__.__storage_version__, - "Content-Length": ustr(page_end - start) - }) + headers) if resp is None or resp.status != httpclient.CREATED: raise UploadError( "Failed to upload page blob: {0}".format(resp.status)) @@ -634,9 +643,14 @@ class WireClient(object): def fetch_manifest(self, version_uris): logger.verbose("Fetch manifest") for version in version_uris: - response = self.fetch(version.uri) + response = None + if not HostPluginProtocol.is_default_channel(): + response = self.fetch(version.uri) if not response: - logger.verbose("Manifest could not be downloaded, falling back to host plugin") + if HostPluginProtocol.is_default_channel(): + logger.verbose("Using host plugin as default channel") + else: + logger.verbose("Manifest could not be downloaded, falling back to host plugin") host = self.get_host_plugin() uri, headers = host.get_artifact_request(version.uri) response = self.fetch(uri, headers) @@ -648,6 +662,9 @@ class WireClient(object): else: host.manifest_uri = version.uri logger.verbose("Manifest downloaded successfully from host plugin") + if not HostPluginProtocol.is_default_channel(): + logger.info("Setting host plugin as default channel") + HostPluginProtocol.set_default_channel(True) if response: return response raise ProtocolError("Failed to fetch manifest from all sources") @@ -663,12 +680,11 @@ class WireClient(object): if resp.status == httpclient.OK: return_value = self.decode_config(resp.read()) else: - logger.warn("Could not fetch {0} [{1}: {2}]", + logger.warn("Could not fetch {0} [{1}]", uri, - resp.status, - resp.reason) + HostPluginProtocol.read_response_error(resp)) except (HttpError, ProtocolError) as e: - logger.verbose("Fetch failed from [{0}]", uri) + logger.verbose("Fetch failed from [{0}]: {1}", uri, e) return return_value def update_hosting_env(self, goal_state): @@ -839,10 +855,12 @@ class WireClient(object): if ext_conf.status_upload_blob is not None: uploaded = False try: - uploaded = self.status_blob.upload(ext_conf.status_upload_blob) - self.report_blob_type(self.status_blob.type, - ext_conf.status_upload_blob_type) - except (HttpError, ProtocolError) as e: + self.status_blob.prepare(ext_conf.status_upload_blob_type) + if not HostPluginProtocol.is_default_channel(): + uploaded = self.status_blob.upload(ext_conf.status_upload_blob) + self.report_blob_type(self.status_blob.type, + ext_conf.status_upload_blob_type) + except (HttpError, ProtocolError): # errors have already been logged pass if not uploaded: diff --git a/azurelinuxagent/common/utils/restutil.py b/azurelinuxagent/common/utils/restutil.py index 7197370..49d2d68 100644 --- a/azurelinuxagent/common/utils/restutil.py +++ b/azurelinuxagent/common/utils/restutil.py @@ -28,7 +28,7 @@ from azurelinuxagent.common.future import httpclient, urlparse REST api util functions """ -RETRY_WAITING_INTERVAL = 10 +RETRY_WAITING_INTERVAL = 3 secure_warning = True diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py index 30b751c..8a81974 100644 --- a/azurelinuxagent/common/version.py +++ b/azurelinuxagent/common/version.py @@ -76,6 +76,9 @@ def get_distro(): osinfo[2] = "oracle" osinfo[3] = "Oracle Linux" + if os.path.exists("/etc/euleros-release"): + osinfo[0] = "euleros" + # The platform.py lib has issue with detecting BIG-IP linux distribution. # Merge the following patch provided by F5. if os.path.exists("/shared/vadc"): @@ -88,7 +91,7 @@ def get_distro(): AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.2.6' +AGENT_VERSION = '2.2.9' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """\ The Azure Linux Agent supports the provisioning and running of Linux diff --git a/azurelinuxagent/daemon/resourcedisk/default.py b/azurelinuxagent/daemon/resourcedisk/default.py index 21de38f..2b116fb 100644 --- a/azurelinuxagent/daemon/resourcedisk/default.py +++ b/azurelinuxagent/daemon/resourcedisk/default.py @@ -85,6 +85,11 @@ class ResourceDiskHandler(object): except ResourceDiskError as e: logger.error("Failed to enable swap {0}", e) + def reread_partition_table(self, device): + if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): + shellutil.run("blockdev --rereadpt {0}".format(device), + chk_err=False) + def mount_resource_disk(self, mount_point): device = self.osutil.device_for_ide_port(1) if device is None: @@ -138,12 +143,13 @@ class ResourceDiskHandler(object): shellutil.run(mkfs_string) else: logger.info("GPT not detected, determining filesystem") - ret = self.change_partition_type(suppress_message=True, option_str="{0} 1".format(device)) + ret = self.change_partition_type(suppress_message=True, option_str="{0} 1 -n".format(device)) ptype = ret[1].strip() if ptype == "7" and self.fs != "ntfs": logger.info("The partition is formatted with ntfs, updating " "partition type to 83") self.change_partition_type(suppress_message=False, option_str="{0} 1 83".format(device)) + self.reread_partition_table(device) logger.info("Format partition [{0}]", mkfs_string) shellutil.run(mkfs_string) else: @@ -166,8 +172,8 @@ class ResourceDiskHandler(object): logger.info("Mount resource disk [{0}]", mount_string) ret, output = shellutil.run_get_output(mount_string, chk_err=False) - # if the exit code is 32, then the resource disk is already mounted - if ret == 32: + # if the exit code is 32, then the resource disk can be already mounted + if ret == 32 and output.find("is already mounted") != -1: logger.warn("Could not mount resource disk: {0}", output) elif ret != 0: # Some kernels seem to issue an async partition re-read after a @@ -178,9 +184,7 @@ class ResourceDiskHandler(object): logger.warn("Failed to mount resource disk. " "Retry mounting after re-reading partition info.") - if shellutil.run("sfdisk -R {0}".format(device), chk_err=False): - shellutil.run("blockdev --rereadpt {0}".format(device), - chk_err=False) + self.reread_partition_table(device) ret, output = shellutil.run_get_output(mount_string) if ret: diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py index 9b99d04..e0125aa 100644 --- a/azurelinuxagent/ga/exthandlers.py +++ b/azurelinuxagent/ga/exthandlers.py @@ -149,14 +149,17 @@ def migrate_handler_state(): logger.warn("Exception occurred removing {0}: {1}", handler_state_path, str(e)) return + class ExtHandlerState(object): NotInstalled = "NotInstalled" Installed = "Installed" Enabled = "Enabled" + def get_exthandlers_handler(): return ExtHandlersHandler() + class ExtHandlersHandler(object): def __init__(self): self.protocol_util = get_protocol_util() @@ -222,8 +225,8 @@ class ExtHandlersHandler(object): self.log_etag = True state = ext_handler.properties.state - ext_handler_i.logger.info("Expected handler state: {0}", state) - if state == "enabled": + ext_handler_i.logger.info("Target handler state: {0}", state) + if state == u"enabled": self.handle_enable(ext_handler_i) elif state == u"disabled": self.handle_disable(ext_handler_i) @@ -237,21 +240,17 @@ class ExtHandlersHandler(object): ext_handler_i.report_event(message=ustr(e), is_success=False) def handle_enable(self, ext_handler_i): - old_ext_handler_i = ext_handler_i.get_installed_ext_handler() if old_ext_handler_i is not None and \ old_ext_handler_i.version_gt(ext_handler_i): raise ExtensionError(u"Downgrade not allowed") - handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + ext_handler_i.logger.info("[Enable] current handler state is: {0}", + handler_state.lower()) if handler_state == ExtHandlerState.NotInstalled: ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) - ext_handler_i.download() - ext_handler_i.update_settings() - if old_ext_handler_i is None: ext_handler_i.install() elif ext_handler_i.version_gt(old_ext_handler_i): @@ -268,13 +267,15 @@ class ExtHandlersHandler(object): def handle_disable(self, ext_handler_i): handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + ext_handler_i.logger.info("[Disable] current handler state is: {0}", + handler_state.lower()) if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() def handle_uninstall(self, ext_handler_i): handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + ext_handler_i.logger.info("[Uninstall] current handler state is: {0}", + handler_state.lower()) if handler_state != ExtHandlerState.NotInstalled: if handler_state == ExtHandlerState.Enabled: ext_handler_i.disable() @@ -299,7 +300,7 @@ class ExtHandlersHandler(object): try: self.protocol.report_vm_status(vm_status) if self.log_report: - logger.verbose("Successfully reported vm agent status") + logger.verbose("Completed vm agent status report") except ProtocolError as e: message = "Failed to report vm agent status: {0}".format(e) add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=False, message=message) @@ -510,7 +511,7 @@ class ExtHandlerInstance(object): op=self.operation, is_success=is_success) def download(self): - self.logger.info("Download extension package") + self.logger.verbose("Download extension package") self.set_operation(WALAEventOperation.Download) if self.pkg is None: raise ExtensionError("No package uri found") @@ -527,7 +528,7 @@ class ExtHandlerInstance(object): if package is None: raise ExtensionError("Failed to download extension") - self.logger.info("Unpack extension package") + self.logger.verbose("Unpack extension package") pkg_file = os.path.join(conf.get_lib_dir(), os.path.basename(uri.uri) + ".zip") try: @@ -569,38 +570,38 @@ class ExtHandlerInstance(object): self.create_handler_env() def enable(self): - self.logger.info("Enable extension.") self.set_operation(WALAEventOperation.Enable) - man = self.load_manifest() - self.launch_command(man.get_enable_command(), timeout=300) + enable_cmd = man.get_enable_command() + self.logger.info("Enable extension [{0}]".format(enable_cmd)) + self.launch_command(enable_cmd, timeout=300) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") def disable(self): - self.logger.info("Disable extension.") self.set_operation(WALAEventOperation.Disable) - man = self.load_manifest() - self.launch_command(man.get_disable_command(), timeout=900) + disable_cmd = man.get_disable_command() + self.logger.info("Disable extension [{0}]".format(disable_cmd)) + self.launch_command(disable_cmd, timeout=900) self.set_handler_state(ExtHandlerState.Installed) self.set_handler_status(status="NotReady", message="Plugin disabled") def install(self): - self.logger.info("Install extension.") - self.set_operation(WALAEventOperation.Install) - man = self.load_manifest() - self.launch_command(man.get_install_command(), timeout=900) + install_cmd = man.get_install_command() + self.logger.info("Install extension [{0}]".format(install_cmd)) + self.set_operation(WALAEventOperation.Install) + self.launch_command(install_cmd, timeout=900) self.set_handler_state(ExtHandlerState.Installed) def uninstall(self): - self.logger.info("Uninstall extension.") - self.set_operation(WALAEventOperation.UnInstall) - try: + self.set_operation(WALAEventOperation.UnInstall) man = self.load_manifest() - self.launch_command(man.get_uninstall_command()) + uninstall_cmd = man.get_uninstall_command() + self.logger.info("Uninstall extension [{0}]".format(uninstall_cmd)) + self.launch_command(uninstall_cmd) except ExtensionError as e: self.report_event(message=ustr(e), is_success=False) @@ -608,18 +609,19 @@ class ExtHandlerInstance(object): try: base_dir = self.get_base_dir() if os.path.isdir(base_dir): - self.logger.info("Remove ext handler dir: {0}", base_dir) + self.logger.info("Remove extension handler directory: {0}", + base_dir) shutil.rmtree(base_dir) except IOError as e: - message = "Failed to rm ext handler dir: {0}".format(e) + message = "Failed to remove extension handler directory: {0}".format(e) self.report_event(message=message, is_success=False) def update(self): - self.logger.info("Update extension.") self.set_operation(WALAEventOperation.Update) - man = self.load_manifest() - self.launch_command(man.get_update_command(), timeout=900) + update_cmd = man.get_update_command() + self.logger.info("Update extension [{0}]".format(update_cmd)) + self.launch_command(update_cmd, timeout=900) def update_with_install(self): man = self.load_manifest() @@ -709,23 +711,24 @@ class ExtHandlerInstance(object): heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] except IOError as e: raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) - except ValueError as e: + except (ValueError, KeyError) as e: raise ExtensionError("Malformed heartbeat file: {0}".format(e)) return heartbeat def is_responsive(self, heartbeat_file): - last_update=int(time.time() - os.stat(heartbeat_file).st_mtime) - return last_update <= 600 # updated within the last 10 min + last_update = int(time.time() - os.stat(heartbeat_file).st_mtime) + return last_update <= 600 # updated within the last 10 min def launch_command(self, cmd, timeout=300): - self.logger.info("Launch command:{0}", cmd) + self.logger.verbose("Launch command: [{0}]", cmd) base_dir = self.get_base_dir() try: devnull = open(os.devnull, 'w') child = subprocess.Popen(base_dir + "/" + cmd, shell=True, cwd=base_dir, - stdout=devnull) + stdout=devnull, + env=os.environ) except Exception as e: #TODO do not catch all exception raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py index 59bc70c..203bb41 100644 --- a/azurelinuxagent/ga/update.py +++ b/azurelinuxagent/ga/update.py @@ -27,6 +27,7 @@ import signal import subprocess import sys import time +import traceback import zipfile import azurelinuxagent.common.conf as conf @@ -40,6 +41,7 @@ from azurelinuxagent.common.exception import UpdateError, ProtocolError from azurelinuxagent.common.future import ustr from azurelinuxagent.common.osutil import get_osutil from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol from azurelinuxagent.common.utils.flexible_version import FlexibleVersion from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ @@ -49,7 +51,6 @@ from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG from azurelinuxagent.ga.exthandlers import HandlerManifest - AGENT_ERROR_FILE = "error.json" # File name for agent error record AGENT_MANIFEST_FILE = "HandlerManifest.json" @@ -140,14 +141,24 @@ class UpdateHandler(object): cmds, cwd=agent_dir, stdout=sys.stdout, - stderr=sys.stderr) + stderr=sys.stderr, + env=os.environ) + + logger.verbose(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) - logger.info(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) + # If the most current agent is the installed agent and update is enabled, + # assume updates are likely available and poll every second. + # This reduces the start-up impact of finding / launching agent updates on + # fresh VMs. + if latest_agent is None and conf.get_autoupdate_enabled(): + poll_interval = 1 + else: + poll_interval = CHILD_POLL_INTERVAL ret = None start_time = time.time() while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: - time.sleep(CHILD_POLL_INTERVAL) + time.sleep(poll_interval) ret = self.child_process.poll() if ret is not None: break @@ -249,6 +260,7 @@ class UpdateHandler(object): except Exception as e: logger.warn(u"Agent {0} failed with exception: {1}", CURRENT_AGENT, ustr(e)) + logger.warn(traceback.format_exc()) sys.exit(1) return @@ -328,7 +340,7 @@ class UpdateHandler(object): return False family = conf.get_autoupdate_gafamily() - logger.info("Checking for agent family {0} updates", family) + logger.verbose("Checking for agent family {0} updates", family) self.last_attempt_time = now try: @@ -348,7 +360,7 @@ class UpdateHandler(object): manifests = [m for m in manifest_list.vmAgentManifests \ if m.family == family and len(m.versionsManifestUris) > 0] if len(manifests) == 0: - logger.info(u"Incarnation {0} has no agent family {1} updates", etag, family) + logger.verbose(u"Incarnation {0} has no agent family {1} updates", etag, family) return False try: @@ -595,7 +607,7 @@ class GuestAgent(object): self.version = FlexibleVersion(version) location = u"disk" if path is not None else u"package" - logger.info(u"Instantiating Agent {0} from {1}", self.name, location) + logger.verbose(u"Instantiating Agent {0} from {1}", self.name, location) self.error = None self._load_error() @@ -651,14 +663,14 @@ class GuestAgent(object): def _ensure_downloaded(self): try: - logger.info(u"Ensuring Agent {0} is downloaded", self.name) + logger.verbose(u"Ensuring Agent {0} is downloaded", self.name) if self.is_blacklisted: - logger.info(u"Agent {0} is blacklisted - skipping download", self.name) + logger.warn(u"Agent {0} is blacklisted - skipping download", self.name) return if self.is_downloaded: - logger.info(u"Agent {0} was previously downloaded - skipping download", self.name) + logger.verbose(u"Agent {0} was previously downloaded - skipping download", self.name) self._load_manifest() return @@ -672,7 +684,7 @@ class GuestAgent(object): self._load_error() msg = u"Agent {0} downloaded successfully".format(self.name) - logger.info(msg) + logger.verbose(msg) add_event( AGENT_NAME, version=self.version, @@ -698,18 +710,24 @@ class GuestAgent(object): def _download(self): for uri in self.pkg.uris: - if self._fetch(uri.uri): + if not HostPluginProtocol.is_default_channel() and self._fetch(uri.uri): break - else: - if self.host is not None and self.host.ensure_initialized(): + elif self.host is not None and self.host.ensure_initialized(): + if not HostPluginProtocol.is_default_channel(): logger.warn("Download unsuccessful, falling back to host plugin") - uri, headers = self.host.get_artifact_request(uri.uri, self.host.manifest_uri) - if uri is not None \ - and headers is not None \ - and self._fetch(uri, headers=headers): - break else: - logger.warn("Download unsuccessful, host plugin not available") + logger.verbose("Using host plugin as default channel") + + uri, headers = self.host.get_artifact_request(uri.uri, self.host.manifest_uri) + if self._fetch(uri, headers=headers): + if not HostPluginProtocol.is_default_channel(): + logger.verbose("Setting host plugin as default channel") + HostPluginProtocol.set_default_channel(True) + break + else: + logger.warn("Host plugin download unsuccessful") + else: + logger.error("No download channels available") if not os.path.isfile(self.get_agent_pkg_path()): msg = u"Unable to download Agent {0} from any URI".format(self.name) @@ -731,7 +749,10 @@ class GuestAgent(object): fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) - logger.info(u"Agent {0} downloaded from {1}", self.name, uri) + logger.verbose(u"Agent {0} downloaded from {1}", self.name, uri) + else: + logger.verbose("Fetch was unsuccessful [{0}]", + HostPluginProtocol.read_response_error(resp)) except restutil.HttpError as http_error: logger.verbose(u"Agent {0} download from {1} failed [{2}]", self.name, @@ -744,7 +765,7 @@ class GuestAgent(object): if self.error is None: self.error = GuestAgentError(self.get_agent_error_file()) self.error.load() - logger.info(u"Agent {0} error state: {1}", self.name, ustr(self.error)) + logger.verbose(u"Agent {0} error state: {1}", self.name, ustr(self.error)) except Exception as e: logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) return @@ -780,7 +801,7 @@ class GuestAgent(object): ustr(e)) raise UpdateError(msg) - logger.info( + logger.verbose( u"Agent {0} loaded manifest from {1}", self.name, self.get_agent_manifest_path()) @@ -810,7 +831,7 @@ class GuestAgent(object): self.get_agent_dir()) raise UpdateError(msg) - logger.info( + logger.verbose( u"Agent {0} unpacked successfully to {1}", self.name, self.get_agent_dir()) diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py index a99a006..3a3f36f 100644 --- a/azurelinuxagent/pa/provision/default.py +++ b/azurelinuxagent/pa/provision/default.py @@ -87,10 +87,9 @@ class ProvisionHandler(object): thumbprint = self.get_ssh_host_key_thumbprint(keypair_type) return thumbprint - def get_ssh_host_key_thumbprint(self, keypair_type): - cmd = "ssh-keygen -lf /etc/ssh/ssh_host_{0}_key.pub".format( - keypair_type) - ret = shellutil.run_get_output(cmd) + def get_ssh_host_key_thumbprint(self, keypair_type, chk_err=True): + cmd = "ssh-keygen -lf /etc/ssh/ssh_host_{0}_key.pub".format(keypair_type) + ret = shellutil.run_get_output(cmd, chk_err=chk_err) if ret[0] == 0: return ret[1].rstrip().split()[1].replace(':', '') else: diff --git a/azurelinuxagent/pa/provision/ubuntu.py b/azurelinuxagent/pa/provision/ubuntu.py index a71df37..66866b2 100644 --- a/azurelinuxagent/pa/provision/ubuntu.py +++ b/azurelinuxagent/pa/provision/ubuntu.py @@ -91,7 +91,7 @@ class UbuntuProvisionHandler(ProvisionHandler): if os.path.isfile(path): logger.info("ssh host key found at: {0}".format(path)) try: - thumbprint = self.get_ssh_host_key_thumbprint(keypair_type) + thumbprint = self.get_ssh_host_key_thumbprint(keypair_type, chk_err=False) logger.info("Thumbprint obtained from : {0}".format(path)) return thumbprint except ProvisionError: diff --git a/azurelinuxagent/pa/rdma/centos.py b/azurelinuxagent/pa/rdma/centos.py index 214f9ea..10b7c81 100644 --- a/azurelinuxagent/pa/rdma/centos.py +++ b/azurelinuxagent/pa/rdma/centos.py @@ -82,8 +82,7 @@ class CentOSRDMAHandler(RDMAHandler): # Example match (pkg name, -, followed by 3 segments, fw_version and -): # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 # - fw_version=142 - pattern = '{0}-\d\.\d\.\d\.({1})-'.format( - self.rdma_user_mode_package_name, fw_version) + pattern = '{0}-(\d+\.){{3,}}({1})-'.format(self.rdma_user_mode_package_name, fw_version) return re.match(pattern, pkg) @staticmethod @@ -156,7 +155,7 @@ class CentOSRDMAHandler(RDMAHandler): # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) kmod_pkg = self.get_file_by_pattern( - pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) + pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) if not kmod_pkg: raise Exception("RDMA kernel mode package not found") kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) @@ -165,7 +164,7 @@ class CentOSRDMAHandler(RDMAHandler): # Install user mode driver (microsoft-hyper-v-rdma-*) umod_pkg = self.get_file_by_pattern( - pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) + pkgs, "%s-(\d+\.){3,}(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) if not umod_pkg: raise Exception("RDMA user mode package not found") umod_pkg_path = os.path.join(pkg_dir, umod_pkg) diff --git a/bin/waagent2.0 b/bin/waagent2.0 index 80af1c7..1a72ba7 100644 --- a/bin/waagent2.0 +++ b/bin/waagent2.0 @@ -1094,6 +1094,18 @@ class centosDistro(redhatDistro): super(centosDistro,self).__init__() ############################################################ +# eulerosDistro +############################################################ + +class eulerosDistro(redhatDistro): + """ + EulerOS Distro concrete class + Put EulerOS specific behavior here... + """ + def __init__(self): + super(eulerosDistro,self).__init__() + +############################################################ # oracleDistro ############################################################ @@ -6016,6 +6028,8 @@ def DistInfo(fullname=0): if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=fullname)) distinfo[0] = distinfo[0].strip() # remove trailing whitespace in distro name + if os.path.exists("/etc/euleros-release"): + distinfo[0] = "euleros" return distinfo else: return platform.dist() diff --git a/config/66-azure-storage.rules b/config/66-azure-storage.rules index ab30628..5b2b799 100644 --- a/config/66-azure-storage.rules +++ b/config/66-azure-storage.rules @@ -1,12 +1,22 @@ -ACTION!="add|change", GOTO="azure_end" -SUBSYSTEM!="block", GOTO="azure_end" -ATTRS{ID_VENDOR}!="Msft", GOTO="azure_end" -ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="azure_end" +ACTION=="add|change", SUBSYSTEM=="block", ENV{ID_VENDOR}=="Msft", ENV{ID_MODEL}=="Virtual_Disk", GOTO="azure_disk" +GOTO="azure_end" +LABEL="azure_disk" # Root has a GUID of 0000 as the second value # The resource/resource has GUID of 0001 as the second value ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" +# Wellknown SCSI controllers +ATTRS{device_id}=="{f8b3781a-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi0", GOTO="azure_datadisk" +ATTRS{device_id}=="{f8b3781b-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi1", GOTO="azure_datadisk" +ATTRS{device_id}=="{f8b3781c-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi2", GOTO="azure_datadisk" +ATTRS{device_id}=="{f8b3781d-1e82-4818-a1c3-63d806ec15bb}", ENV{fabric_scsi_controller}="scsi3", GOTO="azure_datadisk" +GOTO="azure_end" + +# Retrieve LUN number for datadisks +LABEL="azure_datadisk" +ENV{DEVTYPE}=="partition", PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/../device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" +PROGRAM="/bin/sh -c 'readlink /sys/class/block/%k/device|cut -d: -f4'", ENV{fabric_name}="$env{fabric_scsi_controller}/lun$result", GOTO="azure_names" GOTO="azure_end" # Create the symlinks @@ -15,4 +25,3 @@ ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" LABEL="azure_end" - diff --git a/debian/changelog b/debian/changelog index d9ee595..bfb9d5f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +walinuxagent (2.2.9-0ubuntu1) zesty; urgency=medium + + * New upstream release (LP: #1683521). + + -- Ćukasz 'sil2100' Zemczak <lukasz.zemczak@ubuntu.com> Tue, 18 Apr 2017 12:27:57 +0200 + walinuxagent (2.2.6-0ubuntu1) zesty; urgency=medium * New upstream release (LP: #1661750). diff --git a/tests/common/test_event.py b/tests/common/test_event.py new file mode 100644 index 0000000..f535411 --- /dev/null +++ b/tests/common/test_event.py @@ -0,0 +1,91 @@ +# Copyright 2017 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +from azurelinuxagent.common.event import init_event_logger, add_event +from azurelinuxagent.common.future import ustr +from tests.tools import * + + +class TestEvent(AgentTestCase): + def test_save_event(self): + tmp_evt = tempfile.mkdtemp() + init_event_logger(tmp_evt) + add_event('test', message='test event') + self.assertTrue(len(os.listdir(tmp_evt)) == 1) + shutil.rmtree(tmp_evt) + + def test_save_event_rollover(self): + tmp_evt = tempfile.mkdtemp() + init_event_logger(tmp_evt) + add_event('test', message='first event') + for i in range(0, 999): + add_event('test', message='test event {0}'.format(i)) + + events = os.listdir(tmp_evt) + events.sort() + self.assertTrue(len(events) == 1000) + + first_event = os.path.join(tmp_evt, events[0]) + with open(first_event) as first_fh: + first_event_text = first_fh.read() + self.assertTrue('first event' in first_event_text) + + add_event('test', message='last event') + events = os.listdir(tmp_evt) + events.sort() + self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) + + first_event = os.path.join(tmp_evt, events[0]) + with open(first_event) as first_fh: + first_event_text = first_fh.read() + self.assertFalse('first event' in first_event_text) + self.assertTrue('test event 0' in first_event_text) + + last_event = os.path.join(tmp_evt, events[-1]) + with open(last_event) as last_fh: + last_event_text = last_fh.read() + self.assertTrue('last event' in last_event_text) + + shutil.rmtree(tmp_evt) + + def test_save_event_cleanup(self): + tmp_evt = tempfile.mkdtemp() + init_event_logger(tmp_evt) + + for i in range(0, 2000): + evt = os.path.join(tmp_evt, '{0}.tld'.format(ustr(1491004920536531 + i))) + with open(evt, 'w') as fh: + fh.write('test event {0}'.format(i)) + + events = os.listdir(tmp_evt) + self.assertTrue(len(events) == 2000, "{0} events found, 2000 expected".format(len(events))) + add_event('test', message='last event') + + events = os.listdir(tmp_evt) + events.sort() + self.assertTrue(len(events) == 1000, "{0} events found, 1000 expected".format(len(events))) + first_event = os.path.join(tmp_evt, events[0]) + with open(first_event) as first_fh: + first_event_text = first_fh.read() + self.assertTrue('test event 1001' in first_event_text) + + last_event = os.path.join(tmp_evt, events[-1]) + with open(last_event) as last_fh: + last_event_text = last_fh.read() + self.assertTrue('last event' in last_event_text) diff --git a/tests/common/test_version.py b/tests/common/test_version.py index df4f0e3..c01430b 100644 --- a/tests/common/test_version.py +++ b/tests/common/test_version.py @@ -21,8 +21,9 @@ import textwrap import mock -import azurelinuxagent.common.version as version -from azurelinuxagent.common.version import * +from azurelinuxagent.common.version import set_current_agent, \ + AGENT_LONG_VERSION, AGENT_VERSION, AGENT_NAME, AGENT_NAME_PATTERN, \ + get_f5_platform from tests.tools import * @@ -67,6 +68,7 @@ class TestCurrentAgentName(AgentTestCase): self.assertEqual(version, str(current_version)) return + class TestGetF5Platforms(AgentTestCase): def test_get_f5_platform_bigip_12_1_1(self): version_file = textwrap.dedent(""" @@ -83,7 +85,7 @@ class TestGetF5Platforms(AgentTestCase): mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): - platform = version.get_f5_platform() + platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.1') self.assertTrue(platform[2] == 'bigip') @@ -104,7 +106,7 @@ class TestGetF5Platforms(AgentTestCase): mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): - platform = version.get_f5_platform() + platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.1.0') self.assertTrue(platform[2] == 'bigip') @@ -125,7 +127,7 @@ class TestGetF5Platforms(AgentTestCase): mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): - platform = version.get_f5_platform() + platform = get_f5_platform() self.assertTrue(platform[0] == 'bigip') self.assertTrue(platform[1] == '12.0.0') self.assertTrue(platform[2] == 'bigip') @@ -146,7 +148,7 @@ class TestGetF5Platforms(AgentTestCase): mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): - platform = version.get_f5_platform() + platform = get_f5_platform() self.assertTrue(platform[0] == 'iworkflow') self.assertTrue(platform[1] == '2.0.1') self.assertTrue(platform[2] == 'iworkflow') @@ -167,7 +169,7 @@ class TestGetF5Platforms(AgentTestCase): mo = mock.mock_open(read_data=version_file) with patch(open_patch(), mo): - platform = version.get_f5_platform() + platform = get_f5_platform() self.assertTrue(platform[0] == 'bigiq') self.assertTrue(platform[1] == '5.1.0') self.assertTrue(platform[2] == 'bigiq') diff --git a/tests/daemon/test_daemon.py b/tests/daemon/test_daemon.py index 77b4e3e..dd31fd7 100644 --- a/tests/daemon/test_daemon.py +++ b/tests/daemon/test_daemon.py @@ -14,8 +14,7 @@ # # Requires Python 2.4+ and Openssl 1.0+ # - -from azurelinuxagent.daemon import * +from azurelinuxagent.daemon import get_daemon_handler from tests.tools import * diff --git a/tests/data/ga/WALinuxAgent-2.2.4.zip b/tests/data/ga/WALinuxAgent-2.2.4.zip Binary files differdeleted file mode 100644 index fd48991..0000000 --- a/tests/data/ga/WALinuxAgent-2.2.4.zip +++ /dev/null diff --git a/tests/data/ga/WALinuxAgent-2.2.8.zip b/tests/data/ga/WALinuxAgent-2.2.8.zip Binary files differnew file mode 100644 index 0000000..04c60a8 --- /dev/null +++ b/tests/data/ga/WALinuxAgent-2.2.8.zip diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py index 5277e59..e7a7af4 100644 --- a/tests/ga/test_update.py +++ b/tests/ga/test_update.py @@ -522,7 +522,8 @@ class TestGuestAgent(UpdateTestCase): self.assertFalse(os.path.isdir(self.agent_path)) mock_http_get.return_value = ResponseMock( - status=restutil.httpclient.SERVICE_UNAVAILABLE) + status=restutil.httpclient.SERVICE_UNAVAILABLE, + response="") ext_uri = 'ext_uri' host_uri = 'host_uri' @@ -1039,7 +1040,7 @@ class TestUpdate(UpdateTestCase): with patch('subprocess.Popen', return_value=mock_child) as mock_popen: with patch('time.time', side_effect=mock_time.time): - with patch('time.sleep', return_value=mock_time.sleep): + with patch('time.sleep', side_effect=mock_time.sleep): self.update_handler.run_latest() self.assertEqual(1, mock_popen.call_count) @@ -1082,7 +1083,22 @@ class TestUpdate(UpdateTestCase): self._test_run_latest(mock_child=mock_child, mock_time=mock_time) self.assertEqual(1, mock_child.poll.call_count) self.assertEqual(0, mock_child.wait.call_count) - self.assertEqual(2, mock_time.time_call_count) + return + + def test_run_latest_polls_frequently_if_installed_is_latest(self): + mock_child = ChildMock(return_value=0) + mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) + self._test_run_latest(mock_time=mock_time) + self.assertEqual(1, mock_time.sleep_interval) + return + + def test_run_latest_polls_moderately_if_installed_not_latest(self): + self.prepare_agents() + + mock_child = ChildMock(return_value=0) + mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/2) + self._test_run_latest(mock_time=mock_time) + self.assertNotEqual(1, mock_time.sleep_interval) return def test_run_latest_defaults_to_current(self): @@ -1137,7 +1153,6 @@ class TestUpdate(UpdateTestCase): return def test_run_latest_exception_blacklists(self): - # logger.add_logger_appender(logger.AppenderType.STDOUT) self.prepare_agents() latest_agent = self.update_handler.get_latest_agent() @@ -1371,9 +1386,10 @@ class ProtocolMock(object): class ResponseMock(Mock): - def __init__(self, status=restutil.httpclient.OK, response=None): + def __init__(self, status=restutil.httpclient.OK, response=None, reason=None): Mock.__init__(self) self.status = status + self.reason = reason self.response = response return @@ -1388,7 +1404,11 @@ class TimeMock(Mock): self.time_call_count = 0 self.time_increment = time_increment - self.sleep = Mock(return_value=0) + self.sleep_interval = None + return + + def sleep(self, n): + self.sleep_interval = n return def time(self): diff --git a/tests/protocol/test_hostplugin.py b/tests/protocol/test_hostplugin.py index 1298fdc..ef91998 100644 --- a/tests/protocol/test_hostplugin.py +++ b/tests/protocol/test_hostplugin.py @@ -15,123 +15,314 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import unittest +import base64 +import json +import sys + + + +if sys.version_info[0] == 3: + import http.client as httpclient + bytebuffer = memoryview +elif sys.version_info[0] == 2: + import httplib as httpclient + bytebuffer = buffer import azurelinuxagent.common.protocol.restapi as restapi import azurelinuxagent.common.protocol.wire as wire import azurelinuxagent.common.protocol.hostplugin as hostplugin + +from azurelinuxagent.common import event +from azurelinuxagent.common.protocol.hostplugin import API_VERSION +from azurelinuxagent.common.utils import restutil + from tests.protocol.mockwiredata import WireProtocolData, DATA_FILE from tests.tools import * -wireserver_url = "168.63.129.16" +hostplugin_status_url = "http://168.63.129.16:32526/status" sas_url = "http://sas_url" -testtype = 'BlockBlob' +wireserver_url = "168.63.129.16" + +block_blob_type = 'BlockBlob' +page_blob_type = 'PageBlob' + api_versions = '["2015-09-01"]' +storage_version = "2014-02-14" +faux_status = "{ 'dummy' : 'data' }" +faux_status_b64 = base64.b64encode(bytes(bytearray(faux_status, encoding='utf-8'))) +if PY_VERSION_MAJOR > 2: + faux_status_b64 = faux_status_b64.decode('utf-8') class TestHostPlugin(AgentTestCase): + + def _compare_data(self, actual, expected): + for k in iter(expected.keys()): + if k == 'content' or k == 'requestUri': + if actual[k] != expected[k]: + print("Mismatch: Actual '{0}'='{1}', " \ + "Expected '{0}'='{3}'".format( + k, actual[k], expected[k])) + return False + elif k == 'headers': + for h in expected['headers']: + if not (h in actual['headers']): + print("Missing Header: '{0}'".format(h)) + return False + else: + print("Unexpected Key: '{0}'".format(k)) + return False + return True + + def _hostplugin_data(self, blob_headers, content=None): + headers = [] + for name in iter(blob_headers.keys()): + headers.append({ + 'headerName': name, + 'headerValue': blob_headers[name] + }) + + data = { + 'requestUri': sas_url, + 'headers': headers + } + if not content is None: + s = base64.b64encode(bytes(content)) + if PY_VERSION_MAJOR > 2: + s = s.decode('utf-8') + data['content'] = s + return data + + def _hostplugin_headers(self, goal_state): + return { + 'x-ms-version': '2015-09-01', + 'Content-type': 'application/json', + 'x-ms-containerid': goal_state.container_id, + 'x-ms-host-config-name': goal_state.role_config_name + } + + def _validate_hostplugin_args(self, args, goal_state, exp_method, exp_url, exp_data): + args, kwargs = args + self.assertEqual(exp_method, args[0]) + self.assertEqual(exp_url, args[1]) + self.assertTrue(self._compare_data(json.loads(args[2]), exp_data)) + + headers = kwargs['headers'] + self.assertEqual(headers['x-ms-containerid'], goal_state.container_id) + self.assertEqual(headers['x-ms-host-config-name'], goal_state.role_config_name) + def test_fallback(self): """ - Validate fallback to upload status using HostGAPlugin is happening when status reporting via - default method is unsuccessful + Validate fallback to upload status using HostGAPlugin is happening when + status reporting via default method is unsuccessful """ test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) - - with patch.object(wire.HostPluginProtocol, "put_vm_status") as patch_put: + status = restapi.VMStatus(status="Ready", message="Guest Agent is running") + with patch.object(wire.HostPluginProtocol, + "ensure_initialized", + return_value=True): with patch.object(wire.StatusBlob, "upload", return_value=False) as patch_upload: - wire_protocol_client = wire.WireProtocol(wireserver_url).client - wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) - wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) - wire_protocol_client.ext_conf.status_upload_blob = sas_url - wire_protocol_client.upload_status_blob() - self.assertTrue(patch_put.call_count == 1, - "Fallback was not engaged") - self.assertTrue(patch_put.call_args[0][1] == sas_url) + with patch.object(wire.HostPluginProtocol, + "_put_page_blob_status") as patch_put: + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.status_blob.set_vm_status(status) + wire_protocol_client.upload_status_blob() + self.assertTrue(patch_put.call_count == 1, + "Fallback was not engaged") + self.assertTrue(patch_put.call_args[0][0] == sas_url) + self.assertTrue(wire.HostPluginProtocol.is_default_channel()) + wire.HostPluginProtocol.set_default_channel(False) + + def test_fallback_failure(self): + """ + Validate that when host plugin fails, the default channel is reset + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", + message="Guest Agent is running") + with patch.object(wire.HostPluginProtocol, + "ensure_initialized", + return_value=True): + with patch.object(wire.StatusBlob, + "upload", + return_value=False): + with patch.object(wire.HostPluginProtocol, + "_put_page_blob_status", + side_effect=wire.HttpError("put failure")) as patch_put: + client = wire.WireProtocol(wireserver_url).client + client.get_goal_state = Mock(return_value=test_goal_state) + client.ext_conf = wire.ExtensionsConfig(None) + client.ext_conf.status_upload_blob = sas_url + client.status_blob.set_vm_status(status) + client.upload_status_blob() + self.assertTrue(patch_put.call_count == 1, + "Fallback was not engaged") + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + + def test_put_status_error_reporting(self): + """ + Validate the telemetry when uploading status fails + """ + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status = restapi.VMStatus(status="Ready", + message="Guest Agent is running") + with patch.object(wire.StatusBlob, + "upload", + return_value=False): + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.status_blob.set_vm_status(status) + put_error = wire.HttpError("put status http error") + with patch.object(event, + "add_event") as patch_add_event: + with patch.object(restutil, + "http_put", + side_effect=put_error) as patch_http_put: + with patch.object(wire.HostPluginProtocol, + "ensure_initialized", return_value=True): + wire_protocol_client.upload_status_blob() + self.assertFalse(wire.HostPluginProtocol.is_default_channel()) + self.assertTrue(patch_add_event.call_count == 1) + def test_validate_http_request(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" - from azurelinuxagent.common.protocol.hostplugin import API_VERSION - from azurelinuxagent.common.utils import restutil - exp_method = 'PUT' - exp_url = 'http://{0}:32526/status'.format(wireserver_url) - exp_data = '{"content": "eyJkdW1teSI6ICJkYXRhIn0=", "headers": [{"headerName": ' \ - '"x-ms-version", "headerValue": "2014-02-14"}, ' \ - '{"headerName": "x-ms-blob-type", "headerValue": "BlockBlob"}], ' \ - '"requestUri": "http://sas_url"}' + + wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + status_blob = wire_protocol_client.status_blob + status_blob.data = faux_status + status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") + + exp_method = 'PUT' + exp_url = hostplugin_status_url + exp_data = self._hostplugin_data( + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) + with patch.object(restutil, "http_request") as patch_http: - wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.get_goal_state = Mock(return_value=test_goal_state) plugin = wire_protocol_client.get_host_plugin() - blob = wire_protocol_client.status_blob - blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") - blob.data = '{"dummy": "data"}' + with patch.object(plugin, 'get_api_versions') as patch_api: patch_api.return_value = API_VERSION - plugin.put_vm_status(blob, sas_url, testtype) - self.assertTrue(patch_http.call_count == 1) - self.assertTrue(patch_http.call_args[0][0] == exp_method) - self.assertTrue(patch_http.call_args[0][1] == exp_url) - self.assertTrue(patch_http.call_args[0][2] == exp_data) + plugin.put_vm_status(status_blob, sas_url, block_blob_type) - # Assert headers - headers = patch_http.call_args[1]['headers'] - self.assertEqual(headers['x-ms-containerid'], test_goal_state.container_id) - self.assertEqual(headers['x-ms-host-config-name'], test_goal_state.role_config_name) + self.assertTrue(patch_http.call_count == 1) + self._validate_hostplugin_args( + patch_http.call_args_list[0], + test_goal_state, + exp_method, exp_url, exp_data) def test_no_fallback(self): """ - Validate fallback to upload status using HostGAPlugin is not happening when status reporting via - default method is successful + Validate fallback to upload status using HostGAPlugin is not happening + when status reporting via default method is successful """ - with patch.object(wire.HostPluginProtocol, - "put_vm_status") as patch_put: + vmstatus = restapi.VMStatus(message="Ready", status="Ready") + with patch.object(wire.HostPluginProtocol, "put_vm_status") as patch_put: with patch.object(wire.StatusBlob, "upload") as patch_upload: patch_upload.return_value = True wire_protocol_client = wire.WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.status_blob.vm_status = vmstatus wire_protocol_client.upload_status_blob() - self.assertTrue(patch_put.call_count == 0, - "Fallback was engaged") + self.assertTrue(patch_put.call_count == 0, "Fallback was engaged") - def test_validate_http_put(self): + def test_validate_block_blob(self): """Validate correct set of data is sent to HostGAPlugin when reporting VM status""" + wire_protocol_client = wire.WireProtocol(wireserver_url).client test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) - expected_url = "http://168.63.129.16:32526/status" - expected_headers = {'x-ms-version': '2015-09-01', - "Content-type": "application/json", - "x-ms-containerid": test_goal_state.container_id, - "x-ms-host-config-name": test_goal_state.role_config_name} - expected_content = '{"content": "eyJkdW1teSI6ICJkYXRhIn0=", ' \ - '"headers": [{"headerName": "x-ms-version", ' \ - '"headerValue": "2014-02-14"}, ' \ - '{"headerName": "x-ms-blob-type", "headerValue": ' \ - '"BlockBlob"}], ' \ - '"requestUri": "http://sas_url"}' host_client = wire.HostPluginProtocol(wireserver_url, test_goal_state.container_id, test_goal_state.role_config_name) self.assertFalse(host_client.is_initialized) self.assertTrue(host_client.api_versions is None) - status_blob = wire.StatusBlob(None) + + status_blob = wire_protocol_client.status_blob + status_blob.data = faux_status + status_blob.type = block_blob_type status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") - status_blob.data = '{"dummy": "data"}' - status_blob.type = "BlockBlob" - with patch.object(wire.HostPluginProtocol, + + exp_method = 'PUT' + exp_url = hostplugin_status_url + exp_data = self._hostplugin_data( + status_blob.get_block_blob_headers(len(faux_status)), + bytearray(faux_status, encoding='utf-8')) + + with patch.object(restutil, "http_request") as patch_http: + with patch.object(wire.HostPluginProtocol, "get_api_versions") as patch_get: - patch_get.return_value = api_versions - with patch.object(restapi.restutil, "http_put") as patch_put: - patch_put.return_value = MagicMock() + patch_get.return_value = api_versions + host_client.put_vm_status(status_blob, sas_url) + + self.assertTrue(patch_http.call_count == 1) + self._validate_hostplugin_args( + patch_http.call_args_list[0], + test_goal_state, + exp_method, exp_url, exp_data) + + def test_validate_page_blobs(self): + """Validate correct set of data is sent for page blobs""" + wire_protocol_client = wire.WireProtocol(wireserver_url).client + test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) + + host_client = wire.HostPluginProtocol(wireserver_url, + test_goal_state.container_id, + test_goal_state.role_config_name) + + self.assertFalse(host_client.is_initialized) + self.assertTrue(host_client.api_versions is None) + + status_blob = wire_protocol_client.status_blob + status_blob.data = faux_status + status_blob.type = page_blob_type + status_blob.vm_status = restapi.VMStatus(message="Ready", status="Ready") + + exp_method = 'PUT' + exp_url = hostplugin_status_url + + page_status = bytearray(status_blob.data, encoding='utf-8') + page_size = int((len(page_status) + 511) / 512) * 512 + page_status = bytearray(status_blob.data.ljust(page_size), encoding='utf-8') + page = bytearray(page_size) + page[0: page_size] = page_status[0: len(page_status)] + mock_response = MockResponse('', httpclient.OK) + + with patch.object(restutil, "http_request", + return_value=mock_response) as patch_http: + with patch.object(wire.HostPluginProtocol, + "get_api_versions") as patch_get: + patch_get.return_value = api_versions host_client.put_vm_status(status_blob, sas_url) - self.assertTrue(host_client.is_initialized) - self.assertFalse(host_client.api_versions is None) - self.assertTrue(patch_put.call_count == 1) - self.assertTrue(patch_put.call_args[0][0] == expected_url) - self.assertTrue(patch_put.call_args[1]['data'] == expected_content) - self.assertTrue(patch_put.call_args[1]['headers'] == expected_headers) + + self.assertTrue(patch_http.call_count == 2) + + exp_data = self._hostplugin_data( + status_blob.get_page_blob_create_headers( + page_size)) + self._validate_hostplugin_args( + patch_http.call_args_list[0], + test_goal_state, + exp_method, exp_url, exp_data) + + exp_data = self._hostplugin_data( + status_blob.get_page_blob_page_headers( + 0, page_size), + page) + exp_data['requestUri'] += "?comp=page" + self._validate_hostplugin_args( + patch_http.call_args_list[1], + test_goal_state, + exp_method, exp_url, exp_data) def test_validate_get_extension_artifacts(self): test_goal_state = wire.GoalState(WireProtocolData(DATA_FILE).goal_state) @@ -155,6 +346,7 @@ class TestHostPlugin(AgentTestCase): for k in expected_headers: self.assertTrue(k in actual_headers) self.assertEqual(expected_headers[k], actual_headers[k]) + class MockResponse: def __init__(self, body, status_code): diff --git a/tests/protocol/test_wire.py b/tests/protocol/test_wire.py index e083678..dda7a2b 100644 --- a/tests/protocol/test_wire.py +++ b/tests/protocol/test_wire.py @@ -14,7 +14,7 @@ # # Requires Python 2.4+ and Openssl 1.0+ # - +from azurelinuxagent.common import event from azurelinuxagent.common.protocol.wire import * from tests.protocol.mockwiredata import * @@ -148,13 +148,16 @@ class TestWireProtocolGetters(AgentTestCase): host_uri) def test_upload_status_blob_default(self, *args): + vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl + wire_protocol_client.status_blob.vm_status = vmstatus with patch.object(WireClient, "get_goal_state") as patch_get_goal_state: with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload: - with patch.object(StatusBlob, "upload", return_value = True) as patch_default_upload: + with patch.object(StatusBlob, "upload", return_value=True) as patch_default_upload: + HostPluginProtocol.set_default_channel(False) wire_protocol_client.upload_status_blob() patch_default_upload.assert_called_once_with(testurl) @@ -162,20 +165,63 @@ class TestWireProtocolGetters(AgentTestCase): patch_host_ga_plugin_upload.assert_not_called() def test_upload_status_blob_host_ga_plugin(self, *args): + vmstatus = VMStatus(message="Ready", status="Ready") wire_protocol_client = WireProtocol(wireserver_url).client wire_protocol_client.ext_conf = ExtensionsConfig(None) wire_protocol_client.ext_conf.status_upload_blob = testurl wire_protocol_client.ext_conf.status_upload_blob_type = testtype + wire_protocol_client.status_blob.vm_status = vmstatus goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) - with patch.object(HostPluginProtocol, "put_vm_status") as patch_host_ga_plugin_upload: - with patch.object(StatusBlob, "upload", return_value=False) as patch_default_upload: - wire_protocol_client.get_goal_state = Mock(return_value = goal_state) - wire_protocol_client.upload_status_blob() + with patch.object(HostPluginProtocol, + "ensure_initialized", + return_value=True): + with patch.object(StatusBlob, + "upload", + return_value=False) as patch_default_upload: + with patch.object(HostPluginProtocol, + "_put_block_blob_status") as patch_http: + HostPluginProtocol.set_default_channel(False) + wire_protocol_client.get_goal_state = Mock(return_value=goal_state) + wire_protocol_client.upload_status_blob() + patch_default_upload.assert_called_once_with(testurl) + wire_protocol_client.get_goal_state.assert_called_once() + patch_http.assert_called_once_with(testurl, wire_protocol_client.status_blob) + self.assertTrue(HostPluginProtocol.is_default_channel()) + HostPluginProtocol.set_default_channel(False) + + def test_upload_status_blob_error_reporting(self, *args): + vmstatus = VMStatus(message="Ready", status="Ready") + wire_protocol_client = WireProtocol(wireserver_url).client + wire_protocol_client.ext_conf = ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = testurl + wire_protocol_client.ext_conf.status_upload_blob_type = testtype + wire_protocol_client.status_blob.vm_status = vmstatus + goal_state = GoalState(WireProtocolData(DATA_FILE).goal_state) - patch_default_upload.assert_called_once_with(testurl) - wire_protocol_client.get_goal_state.assert_called_once() - patch_host_ga_plugin_upload.assert_called_once_with(wire_protocol_client.status_blob, testurl, testtype) + with patch.object(HostPluginProtocol, + "ensure_initialized", + return_value=True): + with patch.object(StatusBlob, + "put_block_blob", + side_effect=HttpError("error")): + with patch.object(StatusBlob, + "get_blob_type", + return_value='BlockBlob'): + with patch.object(HostPluginProtocol, + "put_vm_status"): + with patch.object(WireClient, + "report_blob_type", + side_effect=MagicMock()): + with patch.object(event, + "add_event") as patch_add_event: + HostPluginProtocol.set_default_channel(False) + wire_protocol_client.get_goal_state = Mock(return_value=goal_state) + wire_protocol_client.upload_status_blob() + wire_protocol_client.get_goal_state.assert_called_once() + self.assertTrue(patch_add_event.call_count == 1) + self.assertTrue(patch_add_event.call_args_list[0][1]['op'] == 'ReportStatus') + self.assertFalse(HostPluginProtocol.is_default_channel()) def test_get_in_vm_artifacts_profile_blob_not_available(self, *args): wire_protocol_client = WireProtocol(wireserver_url).client @@ -249,6 +295,7 @@ class TestWireProtocolGetters(AgentTestCase): with patch.object(HostPluginProtocol, "get_artifact_request", return_value=[host_uri, {}]): + HostPluginProtocol.set_default_channel(False) self.assertRaises(ProtocolError, client.fetch_manifest, uris) self.assertEqual(patch_fetch.call_count, 2) self.assertEqual(patch_fetch.call_args_list[0][0][0], uri1.uri) |