diff options
author | Daniel Watkins <daniel.watkins@canonical.com> | 2016-09-13 16:11:47 +0100 |
---|---|---|
committer | usd-importer <ubuntu-server@lists.ubuntu.com> | 2016-09-14 10:39:12 +0000 |
commit | 5009a9d0f3606fc08a80ec0d59076d8dc48d2f25 (patch) | |
tree | ad67eef74c5208178950db6ee28195e2137fa713 | |
parent | 0f7cef5b52162d1ebb31a738bd8fc9febe1fbda6 (diff) | |
download | vyos-walinuxagent-5009a9d0f3606fc08a80ec0d59076d8dc48d2f25.tar.gz vyos-walinuxagent-5009a9d0f3606fc08a80ec0d59076d8dc48d2f25.zip |
Import patches-unapplied version 2.1.5-0ubuntu1 to ubuntu/yakkety-proposed
Imported using git-ubuntu import.
Changelog parent: 0f7cef5b52162d1ebb31a738bd8fc9febe1fbda6
New changelog entries:
* New upstream release (LP: #1603581)
- d/patches/disable-auto-update.patch:
- The new version introduces auto-updating of the agent to its latest
version via an internal mechanism; disable this
- d/patches/fix_shebangs.patch:
- Dropped in favour of the dh_python3 --shebang option.
- Refreshed d/patches/disable_udev_overrides.patch
143 files changed, 7128 insertions, 1671 deletions
@@ -2,6 +2,9 @@ __pycache__/ *.py[cod] +# Virtualenv +py3env/ + # C extensions *.so @@ -22,6 +25,9 @@ var/ .installed.cfg *.egg +# PyCharm +.idea/ + # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. @@ -1,5 +1,9 @@ WALinuxAgent Changelog ||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +09 Mar 2016, WALinuxAgent 2.1.4 + . Add support for FreeBSD + . Fix a bug for internal extension version resolving + 29 Jan 2016, WALinuxAgent 2.1.3 . Fixed endpoint probing for Azure Stack . Multiple fixes for extension handling diff --git a/LICENSE-2.0.txt b/LICENSE.txt index d645695..af39a91 100644 --- a/LICENSE-2.0.txt +++ b/LICENSE.txt @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2016 Microsoft Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -61,6 +61,7 @@ http://support.microsoft.com/kb/2805216 Supported Linux Distributions: * CoreOS * CentOS 6.2+ + * Red Hat Enterprise Linux 6.7+ * Debian 7.0+ * Ubuntu 12.04+ * openSUSE 12.3+ @@ -104,7 +105,7 @@ Upgrading via your distribution's package repository is preferred. If upgrading manually, same with installation above by running: - #sudo python setup.py install -force + #sudo python setup.py install --force Restart waagent service,for most of linux distributions: @@ -340,6 +341,12 @@ Type: Boolean Default: n If set, log verbosity is boosted. Waagent logs to /var/log/waagent.log and leverages the system logrotate functionality to rotate logs. +OS.EnableRDMA: +Type: Boolean Default: n + +If set, the agent will attempt to install and then load an RDMA kernel driver +that matches the version of the firmware on the underlying hardware. + OS.RootDeviceScsiTimeout: Type: Integer Default: 300 @@ -528,3 +535,6 @@ version="1.0.0.0" goalStateIncarnation="2"> </Instances> </SharedConfig> ``` + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/azurelinuxagent/__init__.py b/azurelinuxagent/__init__.py index 1ea2f38..2ef4c16 100644 --- a/azurelinuxagent/__init__.py +++ b/azurelinuxagent/__init__.py @@ -14,4 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # - diff --git a/azurelinuxagent/agent.py b/azurelinuxagent/agent.py index 93e9c16..1309d94 100644 --- a/azurelinuxagent/agent.py +++ b/azurelinuxagent/agent.py @@ -25,48 +25,91 @@ import os import sys import re import subprocess -from azurelinuxagent.metadata import AGENT_NAME, AGENT_LONG_VERSION, \ +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.event as event +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_VERSION, \ DISTRO_NAME, DISTRO_VERSION, \ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO - -from azurelinuxagent.distro.loader import get_distro +from azurelinuxagent.common.osutil import get_osutil class Agent(object): def __init__(self, verbose): """ Initialize agent running environment. """ - self.distro = get_distro(); - self.distro.init_handler.run(verbose) + self.osutil = get_osutil() + #Init stdout log + level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO + logger.add_logger_appender(logger.AppenderType.STDOUT, level) + + #Init config + conf_file_path = self.osutil.get_agent_conf_file_path() + conf.load_conf_from_file(conf_file_path) + + #Init log + verbose = verbose or conf.get_logs_verbose() + level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO + logger.add_logger_appender(logger.AppenderType.FILE, level, + path="/var/log/waagent.log") + logger.add_logger_appender(logger.AppenderType.CONSOLE, level, + path="/dev/console") + + #Init event reporter + event_dir = os.path.join(conf.get_lib_dir(), "events") + event.init_event_logger(event_dir) + event.enable_unhandled_err_dump("WALA") def daemon(self): """ Run agent daemon """ - self.distro.daemon_handler.run() + from azurelinuxagent.daemon import get_daemon_handler + daemon_handler = get_daemon_handler() + daemon_handler.run() + + def provision(self): + """ + Run provision command + """ + from azurelinuxagent.pa.provision import get_provision_handler + provision_handler = get_provision_handler() + provision_handler.run() def deprovision(self, force=False, deluser=False): """ Run deprovision command """ - self.distro.deprovision_handler.run(force=force, deluser=deluser) + from azurelinuxagent.pa.deprovision import get_deprovision_handler + deprovision_handler = get_deprovision_handler() + deprovision_handler.run(force=force, deluser=deluser) def register_service(self): """ Register agent as a service """ print("Register {0} service".format(AGENT_NAME)) - self.distro.osutil.register_agent_service() + self.osutil.register_agent_service() print("Start {0} service".format(AGENT_NAME)) - self.distro.osutil.start_agent_service() + self.osutil.start_agent_service() + + def run_exthandlers(self): + """ + Run the update and extension handler + """ + from azurelinuxagent.ga.update import get_update_handler + update_handler = get_update_handler() + update_handler.run() -def main(): +def main(args=[]): """ Parse command line arguments, exit with usage() on error. Invoke different methods according to different command """ - command, force, verbose = parse_args(sys.argv[1:]) + if len(args) <= 0: + args = sys.argv[1:] + command, force, verbose = parse_args(args) if command == "version": version() elif command == "help": @@ -74,15 +117,22 @@ def main(): elif command == "start": start() else: - agent = Agent(verbose) - if command == "deprovision+user": - agent.deprovision(force, deluser=True) - elif command == "deprovision": - agent.deprovision(force, deluser=False) - elif command == "register-service": - agent.register_service() - elif command == "daemon": - agent.daemon() + try: + agent = Agent(verbose) + if command == "deprovision+user": + agent.deprovision(force, deluser=True) + elif command == "deprovision": + agent.deprovision(force, deluser=False) + elif command == "provision": + agent.provision() + elif command == "register-service": + agent.register_service() + elif command == "daemon": + agent.daemon() + elif command == "run-exthandlers": + agent.run_exthandlers() + except Exception as e: + logger.error(u"Failed to run '{0}': {1}", command, e) def parse_args(sys_args): """ @@ -102,6 +152,8 @@ def parse_args(sys_args): cmd = "start" elif re.match("^([-/]*)register-service", a): cmd = "register-service" + elif re.match("^([-/]*)run-exthandlers", a): + cmd = "run-exthandlers" elif re.match("^([-/]*)version", a): cmd = "version" elif re.match("^([-/]*)verbose", a): @@ -128,8 +180,9 @@ def usage(): Show agent usage """ print("") - print((("usage: {0} [-verbose] [-force] [-help]" - "-deprovision[+user]|-register-service|-version|-daemon|-start]" + print((("usage: {0} [-verbose] [-force] [-help] " + "-deprovision[+user]|-register-service|-version|-daemon|-start|" + "-run-exthandlers]" "").format(sys.argv[0]))) print("") @@ -141,3 +194,5 @@ def start(): devnull = open(os.devnull, 'w') subprocess.Popen([sys.argv[0], '-daemon'], stdout=devnull, stderr=devnull) +if __name__ == '__main__' : + main() diff --git a/azurelinuxagent/distro/debian/__init__.py b/azurelinuxagent/common/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/debian/__init__.py +++ b/azurelinuxagent/common/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/conf.py b/azurelinuxagent/common/conf.py index 7921e79..1a3b0da 100644 --- a/azurelinuxagent/conf.py +++ b/azurelinuxagent/common/conf.py @@ -21,8 +21,8 @@ Module conf loads and parses configuration file """ import os -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.exception import AgentConfigError +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.exception import AgentConfigError class ConfigurationProvider(object): """ @@ -80,6 +80,9 @@ def load_conf_from_file(conf_file_path, conf=__conf__): raise AgentConfigError(("Failed to load conf file:{0}, {1}" "").format(conf_file_path, err)) +def enable_rdma(conf=__conf__): + return conf.get_switch("OS.EnableRDMA", False) + def get_logs_verbose(conf=__conf__): return conf.get_switch("Logs.Verbose", False) @@ -104,6 +107,9 @@ def get_home_dir(conf=__conf__): def get_passwd_file_path(conf=__conf__): return conf.get("OS.PasswordPath", "/etc/shadow") +def get_sudoers_dir(conf=__conf__): + return conf.get("OS.SudoersDir", "/etc/sudoers.d") + def get_sshd_conf_file_path(conf=__conf__): return conf.get("OS.SshdConfigPath", "/etc/ssh/sshd_config") @@ -164,3 +170,12 @@ def get_resourcedisk_filesystem(conf=__conf__): def get_resourcedisk_swap_size_mb(conf=__conf__): return conf.get_int("ResourceDisk.SwapSizeMB", 0) +def get_autoupdate_gafamily(conf=__conf__): + return conf.get("AutoUpdate.GAFamily", "Prod") + +def get_autoupdate_enabled(conf=__conf__): + return conf.get_switch("AutoUpdate.Enabled", True) + +def get_autoupdate_frequency(conf=__conf__): + return conf.get_int("Autoupdate.Frequency", 3600) + diff --git a/azurelinuxagent/distro/default/dhcp.py b/azurelinuxagent/common/dhcp.py index fc439d2..d5c90cb 100644 --- a/azurelinuxagent/distro/default/dhcp.py +++ b/azurelinuxagent/common/dhcp.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -15,32 +13,43 @@ # limitations under the License. # # Requires Python 2.4+ and Openssl 1.0+ + import os import socket import array import time -import threading -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.textutil import hex_dump, hex_dump2, hex_dump3, \ - compare_bytes, str_to_ord, \ - unpack_big_endian, \ - unpack_little_endian, \ - int_to_ip4_addr -from azurelinuxagent.exception import DhcpError +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.utils.textutil import hex_dump, hex_dump2, \ + hex_dump3, \ + compare_bytes, str_to_ord, \ + unpack_big_endian, \ + int_to_ip4_addr +from azurelinuxagent.common.exception import DhcpError +from azurelinuxagent.common.osutil import get_osutil + +# the kernel routing table representation of 168.63.129.16 +KNOWN_WIRESERVER_IP_ENTRY = '10813FA8' +KNOWN_WIRESERVER_IP = '168.63.129.16' + + +def get_dhcp_handler(): + return DhcpHandler() class DhcpHandler(object): """ Azure use DHCP option 245 to pass endpoint ip to VMs. """ - def __init__(self, distro): - self.distro = distro + + def __init__(self): + self.osutil = get_osutil() self.endpoint = None self.gateway = None self.routes = None + self._request_broadcast = False + self.skip_cache = False def run(self): """ @@ -48,6 +57,9 @@ class DhcpHandler(object): Configure default gateway and routes Save wire server endpoint if found """ + if self.wireserver_route_exists or self.dhcp_cache_exists: + return + self.send_dhcp_req() self.conf_routes() @@ -55,30 +67,81 @@ class DhcpHandler(object): """ Wait for network stack to be initialized. """ - ipv4 = self.distro.osutil.get_ip4_addr() + ipv4 = self.osutil.get_ip4_addr() while ipv4 == '' or ipv4 == '0.0.0.0': logger.info("Waiting for network.") time.sleep(10) logger.info("Try to start network interface.") - self.distro.osutil.start_network() - ipv4 = self.distro.osutil.get_ip4_addr() + self.osutil.start_network() + ipv4 = self.osutil.get_ip4_addr() + + @property + def wireserver_route_exists(self): + """ + Determine whether a route to the known wireserver + ip already exists, and if so use that as the endpoint. + This is true when running in a virtual network. + :return: True if a route to KNOWN_WIRESERVER_IP exists. + """ + route_exists = False + logger.info("test for route to {0}".format(KNOWN_WIRESERVER_IP)) + try: + route_file = '/proc/net/route' + if os.path.exists(route_file) and \ + KNOWN_WIRESERVER_IP_ENTRY in open(route_file).read(): + # reset self.gateway and self.routes + # we do not need to alter the routing table + self.endpoint = KNOWN_WIRESERVER_IP + self.gateway = None + self.routes = None + route_exists = True + logger.info("route to {0} exists".format(KNOWN_WIRESERVER_IP)) + else: + logger.warn( + "no route exists to {0}".format(KNOWN_WIRESERVER_IP)) + except Exception as e: + logger.error( + "could not determine whether route exists to {0}: {1}".format( + KNOWN_WIRESERVER_IP, e)) + + return route_exists + + @property + def dhcp_cache_exists(self): + """ + Check whether the dhcp options cache exists and contains the + wireserver endpoint, unless skip_cache is True. + :return: True if the cached endpoint was found in the dhcp lease + """ + if self.skip_cache: + return False + + exists = False + + logger.info("checking for dhcp lease cache") + cached_endpoint = self.osutil.get_dhcp_lease_endpoint() + if cached_endpoint is not None: + self.endpoint = cached_endpoint + exists = True + logger.info("cache exists [{0}]".format(exists)) + return exists def conf_routes(self): logger.info("Configure routes") logger.info("Gateway:{0}", self.gateway) logger.info("Routes:{0}", self.routes) - #Add default gateway + # Add default gateway if self.gateway is not None: - self.distro.osutil.route_add(0 , 0, self.gateway) + self.osutil.route_add(0, 0, self.gateway) if self.routes is not None: for route in self.routes: - self.distro.osutil.route_add(route[0], route[1], route[2]) + self.osutil.route_add(route[0], route[1], route[2]) - def _send_dhcp_req(self, request): + def _send_dhcp_req(self, request): __waiting_duration__ = [0, 10, 30, 60, 60] for duration in __waiting_duration__: try: - self.distro.osutil.allow_dhcp_broadcast() + self.osutil.allow_dhcp_broadcast() response = socket_send(request) validate_dhcp_resp(request, response) return response @@ -94,32 +157,37 @@ class DhcpHandler(object): Stop dhcp service if necessary """ logger.info("Send dhcp request") - mac_addr = self.distro.osutil.get_mac_addr() - req = build_dhcp_request(mac_addr) + mac_addr = self.osutil.get_mac_addr() + + # Do unicast first, then fallback to broadcast if fails. + req = build_dhcp_request(mac_addr, self._request_broadcast) + if not self._request_broadcast: + self._request_broadcast = True # Temporary allow broadcast for dhcp. Remove the route when done. - missing_default_route = self.distro.osutil.is_missing_default_route() - ifname = self.distro.osutil.get_if_name() + missing_default_route = self.osutil.is_missing_default_route() + ifname = self.osutil.get_if_name() if missing_default_route: - self.distro.osutil.set_route_for_dhcp_broadcast(ifname) + self.osutil.set_route_for_dhcp_broadcast(ifname) # In some distros, dhcp service needs to be shutdown before agent probe # endpoint through dhcp. - if self.distro.osutil.is_dhcp_enabled(): - self.distro.osutil.stop_dhcp_service() + if self.osutil.is_dhcp_enabled(): + self.osutil.stop_dhcp_service() resp = self._send_dhcp_req(req) - - if self.distro.osutil.is_dhcp_enabled(): - self.distro.osutil.start_dhcp_service() + + if self.osutil.is_dhcp_enabled(): + self.osutil.start_dhcp_service() if missing_default_route: - self.distro.osutil.remove_route_for_dhcp_broadcast(ifname) + self.osutil.remove_route_for_dhcp_broadcast(ifname) if resp is None: raise DhcpError("Failed to receive dhcp response.") self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) + def validate_dhcp_resp(request, response): bytes_recv = len(response) if bytes_recv < 0xF6: @@ -127,35 +195,37 @@ def validate_dhcp_resp(request, response): bytes_recv) return False - logger.verb("BytesReceived:{0}", hex(bytes_recv)) - logger.verb("DHCP response:{0}", hex_dump(response, bytes_recv)) + logger.verbose("BytesReceived:{0}", hex(bytes_recv)) + logger.verbose("DHCP response:{0}", hex_dump(response, bytes_recv)) # check transactionId, cookie, MAC address cookie should never mismatch # transactionId and MAC address may mismatch if we see a response # meant from another machine if not compare_bytes(request, response, 0xEC, 4): - logger.verb("Cookie not match:\nsend={0},\nreceive={1}", + logger.verbose("Cookie not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0xEC, 4), hex_dump3(response, 0xEC, 4)) raise DhcpError("Cookie in dhcp respones doesn't match the request") if not compare_bytes(request, response, 4, 4): - logger.verb("TransactionID not match:\nsend={0},\nreceive={1}", + logger.verbose("TransactionID not match:\nsend={0},\nreceive={1}", hex_dump3(request, 4, 4), hex_dump3(response, 4, 4)) raise DhcpError("TransactionID in dhcp respones " - "doesn't match the request") + "doesn't match the request") if not compare_bytes(request, response, 0x1C, 6): - logger.verb("Mac Address not match:\nsend={0},\nreceive={1}", + logger.verbose("Mac Address not match:\nsend={0},\nreceive={1}", hex_dump3(request, 0x1C, 6), hex_dump3(response, 0x1C, 6)) raise DhcpError("Mac Addr in dhcp respones " - "doesn't match the request") + "doesn't match the request") + def parse_route(response, option, i, length, bytes_recv): # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx - logger.verb("Routes at offset: {0} with length:{1}", hex(i), hex(length)) + logger.verbose("Routes at offset: {0} with length:{1}", hex(i), + hex(length)) routes = [] if length < 5: logger.error("Data too small for option:{0}", option) @@ -176,6 +246,7 @@ def parse_route(response, option, i, length, bytes_recv): logger.error("Unable to parse routes") return routes + def parse_ip_addr(response, option, i, length, bytes_recv): if i + 5 < bytes_recv: if length != 4: @@ -188,48 +259,51 @@ def parse_ip_addr(response, option, i, length, bytes_recv): logger.error("Data too small for option:{0}", option) return None + def parse_dhcp_resp(response): """ Parse DHCP response: Returns endpoint server or None on error. """ - logger.verb("parse Dhcp Response") + logger.verbose("parse Dhcp Response") bytes_recv = len(response) endpoint = None gateway = None routes = None # Walk all the returned options, parsing out what we need, ignoring the - # others. We need the custom option 245 to find the the endpoint we talk to, - # as well as, to handle some Linux DHCP client incompatibilities, - # options 3 for default gateway and 249 for routes. And 255 is end. + # others. We need the custom option 245 to find the the endpoint we talk to + # as well as to handle some Linux DHCP client incompatibilities; + # options 3 for default gateway and 249 for routes; 255 is end. - i = 0xF0 # offset to first option + i = 0xF0 # offset to first option while i < bytes_recv: option = str_to_ord(response[i]) length = 0 if (i + 1) < bytes_recv: length = str_to_ord(response[i + 1]) - logger.verb("DHCP option {0} at offset:{1} with length:{2}", - hex(option), hex(i), hex(length)) + logger.verbose("DHCP option {0} at offset:{1} with length:{2}", + hex(option), hex(i), hex(length)) if option == 255: - logger.verb("DHCP packet ended at offset:{0}", hex(i)) + logger.verbose("DHCP packet ended at offset:{0}", hex(i)) break elif option == 249: routes = parse_route(response, option, i, length, bytes_recv) elif option == 3: gateway = parse_ip_addr(response, option, i, length, bytes_recv) - logger.verb("Default gateway:{0}, at {1}", gateway, hex(i)) + logger.verbose("Default gateway:{0}, at {1}", gateway, hex(i)) elif option == 245: endpoint = parse_ip_addr(response, option, i, length, bytes_recv) - logger.verb("Azure wire protocol endpoint:{0}, at {1}", gateway, - hex(i)) + logger.verbose("Azure wire protocol endpoint:{0}, at {1}", + endpoint, + hex(i)) else: - logger.verb("Skipping DHCP option:{0} at {1} with length {2}", - hex(option), hex(i), hex(length)) + logger.verbose("Skipping DHCP option:{0} at {1} with length {2}", + hex(option), hex(i), hex(length)) i += length + 2 return endpoint, gateway, routes + def socket_send(request): sock = None try: @@ -240,7 +314,7 @@ def socket_send(request): sock.bind(("0.0.0.0", 68)) sock.sendto(request, ("<broadcast>", 67)) sock.settimeout(10) - logger.verb("Send DHCP request: Setting socket.timeout=10, " + logger.verbose("Send DHCP request: Setting socket.timeout=10, " "entering recv") response = sock.recv(1024) return response @@ -250,27 +324,28 @@ def socket_send(request): if sock is not None: sock.close() -def build_dhcp_request(mac_addr): + +def build_dhcp_request(mac_addr, request_broadcast): """ Build DHCP request string. """ # # typedef struct _DHCP { - # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ - # UINT8 HardwareAddressType; /* htype: ethernet */ - # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ - # UINT8 Hops; /* hops: 0 */ - # UINT8 TransactionID[4]; /* xid: random */ - # UINT8 Seconds[2]; /* secs: 0 */ - # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast */ - # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ - # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ - # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ - # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ - # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ - # UINT8 ServerName[64]; /* sname: 0 */ - # UINT8 BootFileName[128]; /* file: 0 */ - # UINT8 MagicCookie[4]; /* 99 130 83 99 */ + # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ + # UINT8 HardwareAddressType; /* htype: ethernet */ + # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ + # UINT8 Hops; /* hops: 0 */ + # UINT8 TransactionID[4]; /* xid: random */ + # UINT8 Seconds[2]; /* secs: 0 */ + # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast*/ + # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ + # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ + # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ + # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ + # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ + # UINT8 ServerName[64]; /* sname: 0 */ + # UINT8 BootFileName[128]; /* file: 0 */ + # UINT8 MagicCookie[4]; /* 99 130 83 99 */ # /* 0x63 0x82 0x53 0x63 */ # /* options -- hard code ours */ # @@ -297,9 +372,15 @@ def build_dhcp_request(mac_addr): for a in range(0, 4): request[4 + a] = str_to_ord(trans_id[a]) - logger.verb("BuildDhcpRequest: transactionId:%s,%04X" % ( - hex_dump2(trans_id), - unpack_big_endian(request, 4, 4))) + logger.verbose("BuildDhcpRequest: transactionId:%s,%04X" % ( + hex_dump2(trans_id), + unpack_big_endian(request, 4, 4))) + + if request_broadcast: + # set broadcast flag to true to request the dhcp sever + # to respond to a boradcast address, + # this is useful when user dhclient fails. + request[0x0A] = 0x80; # fill in ClientHardwareAddress for a in range(0, 6): @@ -314,5 +395,6 @@ def build_dhcp_request(mac_addr): request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] return array.array("B", request) + def gen_trans_id(): return os.urandom(4) diff --git a/azurelinuxagent/event.py b/azurelinuxagent/common/event.py index f38b242..374b0e7 100644 --- a/azurelinuxagent/event.py +++ b/azurelinuxagent/common/event.py @@ -24,14 +24,14 @@ import time import datetime import threading import platform -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import EventError, ProtocolError -from azurelinuxagent.future import ustr -from azurelinuxagent.protocol.restapi import TelemetryEventParam, \ +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import EventError, ProtocolError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ TelemetryEventList, \ TelemetryEvent, \ set_properties, get_properties -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_CODE_NAME, AGENT_VERSION @@ -71,11 +71,11 @@ class EventLogger(object): except IOError as e: raise EventError("Failed to write events to file:{0}", e) - def add_event(self, name, op="", is_success=True, duration=0, version="1.0", + def add_event(self, name, op="", is_success=True, duration=0, version=AGENT_VERSION, message="", evt_type="", is_internal=False): event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") event.parameters.append(TelemetryEventParam('Name', name)) - event.parameters.append(TelemetryEventParam('Version', version)) + event.parameters.append(TelemetryEventParam('Version', str(version))) event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) event.parameters.append(TelemetryEventParam('Operation', op)) event.parameters.append(TelemetryEventParam('OperationSuccess', @@ -92,7 +92,7 @@ class EventLogger(object): __event_logger__ = EventLogger() -def add_event(name, op="", is_success=True, duration=0, version="1.0", +def add_event(name, op="", is_success=True, duration=0, version=AGENT_VERSION, message="", evt_type="", is_internal=False, reporter=__event_logger__): log = logger.info if is_success else logger.error @@ -102,7 +102,7 @@ def add_event(name, op="", is_success=True, duration=0, version="1.0", logger.warn("Event reporter is not initialized.") return reporter.add_event(name, op=op, is_success=is_success, duration=duration, - version=version, message=message, evt_type=evt_type, + version=str(version), message=message, evt_type=evt_type, is_internal=is_internal) def init_event_logger(event_dir, reporter=__event_logger__): diff --git a/azurelinuxagent/exception.py b/azurelinuxagent/common/exception.py index 7fa5cff..457490c 100644 --- a/azurelinuxagent/exception.py +++ b/azurelinuxagent/common/exception.py @@ -113,3 +113,11 @@ class CryptError(AgentError): """ def __init__(self, msg=None, inner=None): super(CryptError, self).__init__('000011', msg, inner) + +class UpdateError(AgentError): + """ + Update Guest Agent error + """ + def __init__(self, msg=None, inner=None): + super(UpdateError, self).__init__('000012', msg, inner) + diff --git a/azurelinuxagent/future.py b/azurelinuxagent/common/future.py index 8509732..8509732 100644 --- a/azurelinuxagent/future.py +++ b/azurelinuxagent/common/future.py diff --git a/azurelinuxagent/logger.py b/azurelinuxagent/common/logger.py index 6c6b406..c1eb18f 100644 --- a/azurelinuxagent/logger.py +++ b/azurelinuxagent/common/logger.py @@ -14,15 +14,12 @@ # # Requires Python 2.4+ and openssl_bin 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx """ Log utils """ import os import sys -from azurelinuxagent.future import ustr +from azurelinuxagent.common.future import ustr from datetime import datetime class Logger(object): @@ -35,7 +32,7 @@ class Logger(object): self.appenders.extend(logger.appenders) self.prefix = prefix - def verb(self, msg_format, *args): + def verbose(self, msg_format, *args): self.log(LogLevel.VERBOSE, msg_format, *args) def info(self, msg_format, *args): @@ -74,9 +71,7 @@ class Logger(object): class ConsoleAppender(object): def __init__(self, level, path): - self.level = LogLevel.INFO - if level >= LogLevel.INFO: - self.level = level + self.level = level self.path = path def write(self, level, msg): @@ -134,8 +129,8 @@ class AppenderType(object): def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): DEFAULT_LOGGER.add_appender(appender_type, level, path) -def verb(msg_format, *args): - DEFAULT_LOGGER.verb(msg_format, *args) +def verbose(msg_format, *args): + DEFAULT_LOGGER.verbose(msg_format, *args) def info(msg_format, *args): DEFAULT_LOGGER.info(msg_format, *args) diff --git a/azurelinuxagent/common/osutil/__init__.py b/azurelinuxagent/common/osutil/__init__.py new file mode 100644 index 0000000..3b5ba3b --- /dev/null +++ b/azurelinuxagent/common/osutil/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.factory import get_osutil diff --git a/azurelinuxagent/distro/coreos/osutil.py b/azurelinuxagent/common/osutil/coreos.py index ffc83e3..e26fd97 100644 --- a/azurelinuxagent/distro/coreos/osutil.py +++ b/azurelinuxagent/common/osutil/coreos.py @@ -26,11 +26,11 @@ import struct import fcntl import time import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil class CoreOSUtil(DefaultOSUtil): def __init__(self): @@ -67,7 +67,8 @@ class CoreOSUtil(DefaultOSUtil): shellutil.run("systemctl restart systemd-networkd") def restart_ssh_service(self): - return shellutil.run("systemctl restart sshd", chk_err=False) + # SSH is socket activated on CoreOS. No need to restart it. + pass def stop_dhcp_service(self): return shellutil.run("systemctl stop systemd-networkd", chk_err=False) @@ -85,10 +86,6 @@ class CoreOSUtil(DefaultOSUtil): ret= shellutil.run_get_output("pidof systemd-networkd") return ret[1] if ret[0] == 0 else None - def set_ssh_client_alive_interval(self): - #In CoreOS, /etc/sshd_config is mount readonly. Skip the setting - pass - def conf_sshd(self, disable_password): #In CoreOS, /etc/sshd_config is mount readonly. Skip the setting pass diff --git a/azurelinuxagent/distro/debian/osutil.py b/azurelinuxagent/common/osutil/debian.py index a40c1de..f455572 100644 --- a/azurelinuxagent/distro/debian/osutil.py +++ b/azurelinuxagent/common/osutil/debian.py @@ -26,11 +26,11 @@ import struct import fcntl import time import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil class DebianOSUtil(DefaultOSUtil): def __init__(self): diff --git a/azurelinuxagent/distro/default/osutil.py b/azurelinuxagent/common/osutil/default.py index 18ab2ba..c243c85 100644 --- a/azurelinuxagent/distro/default/osutil.py +++ b/azurelinuxagent/common/osutil/default.py @@ -26,14 +26,16 @@ import time import pwd import fcntl import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -from azurelinuxagent.exception import OSUtilError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.utils.cryptutil import CryptUtil +import glob +import datetime +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil __RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", "/etc/udev/rules.d/70-persistent-net.rules" ] @@ -71,7 +73,7 @@ class DefaultOSUtil(object): userentry = self.get_userentry(username) uidmin = None try: - uidmin_def = fileutil.get_line_startingwith("UID_MIN", + uidmin_def = fileutil.get_line_startingwith("UID_MIN", "/etc/login.defs") if uidmin_def is not None: uidmin = int(uidmin_def.split()[1]) @@ -114,21 +116,36 @@ class DefaultOSUtil(object): raise OSUtilError(("Failed to set password for {0}: {1}" "").format(username, output)) - def conf_sudoer(self, username, nopasswd): - # for older distros create sudoers.d - if not os.path.isdir('/etc/sudoers.d/'): - # create the /etc/sudoers.d/ directory - os.mkdir('/etc/sudoers.d/') - # add the include of sudoers.d to the /etc/sudoers - sudoers = '\n' + '#includedir /etc/sudoers.d/\n' - fileutil.append_file('/etc/sudoers', sudoers) - sudoer = None - if nopasswd: - sudoer = "{0} ALL = (ALL) NOPASSWD\n".format(username) + def conf_sudoer(self, username, nopasswd=False, remove=False): + sudoers_dir = conf.get_sudoers_dir() + sudoers_wagent = os.path.join(sudoers_dir, 'waagent') + + if not remove: + # for older distros create sudoers.d + if not os.path.isdir(sudoers_dir): + sudoers_file = os.path.join(sudoers_dir, '../sudoers') + # create the sudoers.d directory + os.mkdir(sudoers_dir) + # add the include of sudoers.d to the /etc/sudoers + sudoers = '\n#includedir ' + sudoers_dir + '\n' + fileutil.append_file(sudoers_file, sudoers) + sudoer = None + if nopasswd: + sudoer = "{0} ALL=(ALL) NOPASSWD: ALL\n".format(username) + else: + sudoer = "{0} ALL=(ALL) ALL\n".format(username) + fileutil.append_file(sudoers_wagent, sudoer) + fileutil.chmod(sudoers_wagent, 0o440) else: - sudoer = "{0} ALL = (ALL) ALL\n".format(username) - fileutil.append_file('/etc/sudoers.d/waagent', sudoer) - fileutil.chmod('/etc/sudoers.d/waagent', 0o440) + #Remove user from sudoers + if os.path.isfile(sudoers_wagent): + try: + content = fileutil.read_file(sudoers_wagent) + sudoers = content.split("\n") + sudoers = [x for x in sudoers if username not in x] + fileutil.write_file(sudoers_wagent, "\n".join(sudoers)) + except IOError as e: + raise OSUtilError("Failed to remove sudoer: {0}".format(e)) def del_root_password(self): try: @@ -179,7 +196,7 @@ class DefaultOSUtil(object): """ path, thumbprint, value = pubkey if path is None: - raise OSUtilError("Publich key path is None") + raise OSUtilError("Public key path is None") crytputil = CryptUtil(conf.get_openssl_cmd()) @@ -198,7 +215,7 @@ class DefaultOSUtil(object): pub_path = os.path.join(lib_dir, thumbprint + '.pub') pub = crytputil.get_pubkey_from_crt(crt_path) fileutil.write_file(pub_path, pub) - self.set_selinux_context(pub_path, + self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') self.openssl_to_openssh(pub_path, path) fileutil.chmod(pub_path, 0o600) @@ -246,46 +263,46 @@ class DefaultOSUtil(object): Returns exit result. """ if self.is_selinux_system(): + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + return 1 return shellutil.run('chcon ' + con + ' ' + path) - def set_ssh_client_alive_interval(self): - conf_file_path = conf.get_sshd_conf_file_path() - conf_file = fileutil.read_file(conf_file_path).split("\n") - textutil.set_ssh_config(conf_file, "ClientAliveInterval", "180") - fileutil.write_file(conf_file_path, '\n'.join(conf_file)) - logger.info("Configured SSH client probing to keep connections alive.") - def conf_sshd(self, disable_password): option = "no" if disable_password else "yes" conf_file_path = conf.get_sshd_conf_file_path() conf_file = fileutil.read_file(conf_file_path).split("\n") textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) - textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", - option) + textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option) + textutil.set_ssh_config(conf_file, "ClientAliveInterval", "180") fileutil.write_file(conf_file_path, "\n".join(conf_file)) - logger.info("Disabled SSH password-based authentication methods.") + logger.info("{0} SSH password-based authentication methods." + .format("Disabled" if disable_password else "Enabled")) + logger.info("Configured SSH client probing to keep connections alive.") def get_dvd_device(self, dev_dir='/dev'): - patten=r'(sr[0-9]|hd[c-z]|cdrom[0-9])' - for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]: + pattern=r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])' + for dvd in [re.match(pattern, dev) for dev in os.listdir(dev_dir)]: if dvd is not None: return "/dev/{0}".format(dvd.group(0)) raise OSUtilError("Failed to get dvd device") - def mount_dvd(self, max_retry=6, chk_err=True): - dvd = self.get_dvd_device() - mount_point = conf.get_dvd_mount_point() + def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None): + if dvd_device is None: + dvd_device = self.get_dvd_device() + if mount_point is None: + mount_point = conf.get_dvd_mount_point() mountlist = shellutil.run_get_output("mount")[1] - existing = self.get_mount_point(mountlist, dvd) + existing = self.get_mount_point(mountlist, dvd_device) if existing is not None: #Already mounted - logger.info("{0} is already mounted at {1}", dvd, existing) + logger.info("{0} is already mounted at {1}", dvd_device, existing) return if not os.path.isdir(mount_point): os.makedirs(mount_point) for retry in range(0, max_retry): - retcode = self.mount(dvd, mount_point, option="-o ro -t iso9660,udf", + retcode = self.mount(dvd_device, mount_point, option="-o ro -t udf,iso9660", chk_err=chk_err) if retcode == 0: logger.info("Successfully mounted dvd") @@ -297,19 +314,26 @@ class DefaultOSUtil(object): if chk_err: raise OSUtilError("Failed to mount dvd.") - def umount_dvd(self, chk_err=True): - mount_point = conf.get_dvd_mount_point() + def umount_dvd(self, chk_err=True, mount_point=None): + if mount_point is None: + mount_point = conf.get_dvd_mount_point() retcode = self.umount(mount_point, chk_err=chk_err) if chk_err and retcode != 0: raise OSUtilError("Failed to umount dvd.") - + def eject_dvd(self, chk_err=True): dvd = self.get_dvd_device() retcode = shellutil.run("eject {0}".format(dvd)) if chk_err and retcode != 0: raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) - def load_atappix_mod(self): + def try_load_atapiix_mod(self): + try: + self.load_atapiix_mod() + except Exception as e: + logger.warn("Could not load ATAPI driver: {0}".format(e)) + + def load_atapiix_mod(self): if self.is_atapiix_mod_loaded(): return ret, kern_version = shellutil.run_get_output("uname -r") @@ -338,7 +362,7 @@ class DefaultOSUtil(object): return False def mount(self, dvd, mount_point, option="", chk_err=True): - cmd = "mount {0} {1} {2}".format(dvd, option, mount_point) + cmd = "mount {0} {1} {2}".format(option, dvd, mount_point) return shellutil.run_get_output(cmd, chk_err)[0] def umount(self, mount_point, chk_err=True): @@ -378,7 +402,7 @@ class DefaultOSUtil(object): def get_mac_addr(self): """ Convienience function, returns mac addr bound to - first non-loobback interface. + first non-loopback interface. """ ifname='' while len(ifname) < 2 : @@ -418,14 +442,156 @@ class DefaultOSUtil(object): logger.warn(('SIOCGIFCONF returned more than {0} up ' 'network interfaces.'), expected) sock = buff.tostring() + primary = bytearray(self.get_primary_interface(), encoding='utf-8') for i in range(0, struct_size * expected, struct_size): iface=sock[i:i+16].split(b'\0', 1)[0] - if iface == b'lo': + if len(iface) == 0 or self.is_loopback(iface) or iface != primary: + # test the next one + logger.info('interface [{0}] skipped'.format(iface)) continue else: + # use this one + logger.info('interface [{0}] selected'.format(iface)) break + return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) + def get_primary_interface(self): + """ + Get the name of the primary interface, which is the one with the + default route attached to it; if there are multiple default routes, + the primary has the lowest Metric. + :return: the interface which has the default route + """ + # from linux/route.h + RTF_GATEWAY = 0x02 + DEFAULT_DEST = "00000000" + + hdr_iface = "Iface" + hdr_dest = "Destination" + hdr_flags = "Flags" + hdr_metric = "Metric" + + idx_iface = -1 + idx_dest = -1 + idx_flags = -1 + idx_metric = -1 + primary = None + primary_metric = None + + logger.info("examine /proc/net/route for primary interface") + with open('/proc/net/route') as routing_table: + idx = 0 + for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): + if header == hdr_iface: + idx_iface = idx + elif header == hdr_dest: + idx_dest = idx + elif header == hdr_flags: + idx_flags = idx + elif header == hdr_metric: + idx_metric = idx + idx = idx + 1 + for entry in routing_table.readlines(): + route = entry.strip(" \n").split("\t") + if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY: + metric = int(route[idx_metric]) + iface = route[idx_iface] + if primary is None or metric < primary_metric: + primary = iface + primary_metric = metric + + if primary is None: + primary = '' + + logger.info('primary interface is [{0}]'.format(primary)) + return primary + + + def is_primary_interface(self, ifname): + """ + Indicate whether the specified interface is the primary. + :param ifname: the name of the interface - eth0, lo, etc. + :return: True if this interface binds the default route + """ + return self.get_primary_interface() == ifname + + + def is_loopback(self, ifname): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) + result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15])) + flags, = struct.unpack('H', result[16:18]) + isloopback = flags & 8 == 8 + logger.info('interface [{0}] has flags [{1}], is loopback [{2}]'.format(ifname, flags, isloopback)) + return isloopback + + def get_dhcp_lease_endpoint(self): + """ + OS specific, this should return the decoded endpoint of + the wireserver from option 245 in the dhcp leases file + if it exists on disk. + :return: The endpoint if available, or None + """ + return None + + @staticmethod + def get_endpoint_from_leases_path(pathglob): + """ + Try to discover and decode the wireserver endpoint in the + specified dhcp leases path. + :param pathglob: The path containing dhcp lease files + :return: The endpoint if available, otherwise None + """ + endpoint = None + + HEADER_LEASE = "lease" + HEADER_OPTION = "option unknown-245" + HEADER_DNS = "option domain-name-servers" + HEADER_EXPIRE = "expire" + FOOTER_LEASE = "}" + FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" + + logger.info("looking for leases in path [{0}]".format(pathglob)) + for lease_file in glob.glob(pathglob): + leases = open(lease_file).read() + if HEADER_OPTION in leases: + cached_endpoint = None + has_option_245 = False + expired = True # assume expired + for line in leases.splitlines(): + if line.startswith(HEADER_LEASE): + cached_endpoint = None + has_option_245 = False + expired = True + elif HEADER_DNS in line: + cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;") + elif HEADER_OPTION in line: + has_option_245 = True + elif HEADER_EXPIRE in line: + if "never" in line: + expired = False + else: + try: + expire_string = line.split(" ", 4)[-1].strip(";") + expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME) + if expire_date > datetime.datetime.utcnow(): + expired = False + except: + logger.error("could not parse expiry token '{0}'".format(line)) + elif FOOTER_LEASE in line: + logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( + cached_endpoint, has_option_245, expired)) + if not expired and cached_endpoint is not None and has_option_245: + endpoint = cached_endpoint + logger.info("found endpoint [{0}]".format(endpoint)) + # we want to return the last valid entry, so + # keep searching + if endpoint is not None: + logger.info("cached endpoint found [{0}]".format(endpoint)) + else: + logger.info("cached endpoint not found") + return endpoint + def is_missing_default_route(self): routes = shellutil.run_get_output("route -n")[1] for route in routes.split("\n"): @@ -492,7 +658,7 @@ class DefaultOSUtil(object): def set_dhcp_hostname(self, hostname): autosend = r'^[^#]*?send\s*host-name.*?(<hostname>|gethostname[(,)])' - dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf'] + dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf'] for conf_file in dhclient_files: if not os.path.isfile(conf_file): continue @@ -501,10 +667,20 @@ class DefaultOSUtil(object): return fileutil.update_conf_file(conf_file, 'send host-name', - 'send host-name {0}'.format(hostname)) + 'send host-name "{0}";'.format(hostname)) - def restart_if(self, ifname): - shellutil.run("ifdown {0} && ifup {1}".format(ifname, ifname)) + def restart_if(self, ifname, retries=3, wait=5): + retry_limit=retries+1 + for attempt in range(1, retry_limit): + return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname)) + if return_code == 0: + return + logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) + if attempt < retry_limit: + logger.info("retrying in {0} seconds".format(wait)) + time.sleep(wait) + else: + logger.warn("exceeded restart retries") def publish_hostname(self, hostname): self.set_dhcp_hostname(hostname) @@ -579,16 +755,7 @@ class DefaultOSUtil(object): logger.error("{0} is a system user. Will not delete it.", username) shellutil.run("> /var/run/utmp") shellutil.run("userdel -f -r " + username) - #Remove user from suders - if os.path.isfile("/etc/suders.d/waagent"): - try: - content = fileutil.read_file("/etc/sudoers.d/waagent") - sudoers = content.split("\n") - sudoers = [x for x in sudoers if username not in x] - fileutil.write_file("/etc/sudoers.d/waagent", - "\n".join(sudoers)) - except IOError as e: - raise OSUtilError("Failed to remove sudoer: {0}".format(e)) + self.conf_sudoer(username, remove=True) def decode_customdata(self, data): return base64.b64decode(data) @@ -606,8 +773,8 @@ class DefaultOSUtil(object): if ret[0] == 0: return int(ret[1]) else: - raise OSUtilError("Failed to get procerssor cores") - + raise OSUtilError("Failed to get processor cores") + def set_admin_access_to_ip(self, dest_ip): #This allows root to access dest_ip rm_old= "iptables -D OUTPUT -d {0} -j ACCEPT -m owner --uid-owner 0" @@ -621,3 +788,5 @@ class DefaultOSUtil(object): shellutil.run(rm_old.format(dest_ip), chk_err=False) shellutil.run(rule.format(dest_ip)) + def check_pid_alive(self, pid): + return pid is not None and os.path.isdir(os.path.join('/proc', pid)) diff --git a/azurelinuxagent/distro/loader.py b/azurelinuxagent/common/osutil/factory.py index 74ea9e7..5e8ae6e 100644 --- a/azurelinuxagent/distro/loader.py +++ b/azurelinuxagent/common/osutil/factory.py @@ -15,53 +15,55 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import azurelinuxagent.logger as logger -from azurelinuxagent.utils.textutil import Version -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ DISTRO_FULL_NAME -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.ubuntu.distro import UbuntuDistro, \ - Ubuntu14Distro, \ - Ubuntu12Distro, \ - UbuntuSnappyDistro -from azurelinuxagent.distro.redhat.distro import RedhatDistro, Redhat6xDistro -from azurelinuxagent.distro.coreos.distro import CoreOSDistro -from azurelinuxagent.distro.suse.distro import SUSE11Distro, SUSEDistro -from azurelinuxagent.distro.debian.distro import DebianDistro -def get_distro(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, +from .default import DefaultOSUtil +from .coreos import CoreOSUtil +from .debian import DebianOSUtil +from .freebsd import FreeBSDOSUtil +from .redhat import RedhatOSUtil, Redhat6xOSUtil +from .suse import SUSEOSUtil, SUSE11OSUtil +from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ + UbuntuSnappyOSUtil + +def get_osutil(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, distro_full_name=DISTRO_FULL_NAME): if distro_name == "ubuntu": if Version(distro_version) == Version("12.04") or \ Version(distro_version) == Version("12.10"): - return Ubuntu12Distro() + return Ubuntu12OSUtil() elif Version(distro_version) == Version("14.04") or \ Version(distro_version) == Version("14.10"): - return Ubuntu14Distro() + return Ubuntu14OSUtil() elif distro_full_name == "Snappy Ubuntu Core": - return UbuntuSnappyDistro() + return UbuntuSnappyOSUtil() else: - return UbuntuDistro() + return UbuntuOSUtil() if distro_name == "coreos": - return CoreOSDistro() + return CoreOSUtil() if distro_name == "suse": if distro_full_name=='SUSE Linux Enterprise Server' and \ Version(distro_version) < Version('12') or \ distro_full_name == 'openSUSE' and \ Version(distro_version) < Version('13.2'): - return SUSE11Distro() + return SUSE11OSUtil() else: - return SUSEDistro() + return SUSEOSUtil() elif distro_name == "debian": - return DebianDistro() + return DebianOSUtil() elif distro_name == "redhat" or distro_name == "centos" or \ distro_name == "oracle": if Version(distro_version) < Version("7"): - return Redhat6xDistro() + return Redhat6xOSUtil() else: - return RedhatDistro() + return RedhatOSUtil() + elif distro_name == "freebsd": + return FreeBSDOSUtil() else: logger.warn("Unable to load distro implemetation for {0}.", distro_name) logger.warn("Use default distro implemetation instead.") - return DefaultDistro() + return DefaultOSUtil() diff --git a/azurelinuxagent/common/osutil/freebsd.py b/azurelinuxagent/common/osutil/freebsd.py new file mode 100644 index 0000000..ddf8db6 --- /dev/null +++ b/azurelinuxagent/common/osutil/freebsd.py @@ -0,0 +1,198 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.osutil.default import DefaultOSUtil + + +class FreeBSDOSUtil(DefaultOSUtil): + def __init__(self): + super(FreeBSDOSUtil, self).__init__() + self._scsi_disks_timeout_set = False + + def set_hostname(self, hostname): + rc_file_path = '/etc/rc.conf' + conf_file = fileutil.read_file(rc_file_path).split("\n") + textutil.set_ini_config(conf_file, "hostname", hostname) + fileutil.write_file(rc_file_path, "\n".join(conf_file)) + shellutil.run("hostname {0}".format(hostname), chk_err=False) + + def restart_ssh_service(self): + return shellutil.run('service sshd restart', chk_err=False) + + def useradd(self, username, expiration=None): + """ + Create user account with 'username' + """ + userentry = self.get_userentry(username) + if userentry is not None: + logger.warn("User {0} already exists, skip useradd", username) + return + + if expiration is not None: + cmd = "pw useradd {0} -e {1} -m".format(username, expiration) + else: + cmd = "pw useradd {0} -m".format(username) + retcode, out = shellutil.run_get_output(cmd) + if retcode != 0: + raise OSUtilError(("Failed to create user account:{0}, " + "retcode:{1}, " + "output:{2}").format(username, retcode, out)) + + def del_account(self, username): + if self.is_sys_user(username): + logger.error("{0} is a system user. Will not delete it.", username) + shellutil.run('> /var/run/utx.active') + shellutil.run('rmuser -y ' + username) + self.conf_sudoer(username, remove=True) + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + if self.is_sys_user(username): + raise OSUtilError(("User {0} is a system user. " + "Will not set passwd.").format(username)) + passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + cmd = "echo '{0}'|pw usermod {1} -H 0 ".format(passwd_hash, username) + ret, output = shellutil.run_get_output(cmd, log_cmd=False) + if ret != 0: + raise OSUtilError(("Failed to set password for {0}: {1}" + "").format(username, output)) + + def del_root_password(self): + err = shellutil.run('pw mod user root -w no') + if err: + raise OSUtilError("Failed to delete root password: Failed to update password database.") + + def get_if_mac(self, ifname): + data = self._get_net_info() + if data[0] == ifname: + return data[2].replace(':', '').upper() + return None + + def get_first_if(self): + return self._get_net_info()[:2] + + def route_add(self, net, mask, gateway): + cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) + return shellutil.run(cmd, chk_err=False) + + def is_missing_default_route(self): + """ + For FreeBSD, the default broadcast goes to current default gw, not a all-ones broadcast address, need to + specify the route manually to get it work in a VNET environment. + SEE ALSO: man ip(4) IP_ONESBCAST, + """ + return True + + def is_dhcp_enabled(self): + return True + + def start_dhcp_service(self): + shellutil.run("/etc/rc.d/dhclient start {0}".format(self.get_if_name()), chk_err=False) + + def allow_dhcp_broadcast(self): + pass + + def set_route_for_dhcp_broadcast(self, ifname): + return shellutil.run("route add 255.255.255.255 -iface {0}".format(ifname), chk_err=False) + + def remove_route_for_dhcp_broadcast(self, ifname): + shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) + + def get_dhcp_pid(self): + ret = shellutil.run_get_output("pgrep -n dhclient") + return ret[1] if ret[0] == 0 else None + + def eject_dvd(self, chk_err=True): + dvd = self.get_dvd_device() + retcode = shellutil.run("cdcontrol -f {0} eject".format(dvd)) + if chk_err and retcode != 0: + raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) + + def restart_if(self, ifname): + # Restart dhclient only to publish hostname + shellutil.run("/etc/rc.d/dhclient restart {0}".format(ifname), chk_err=False) + + def get_total_mem(self): + cmd = "sysctl hw.physmem |awk '{print $2}'" + ret, output = shellutil.run_get_output(cmd) + if ret: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + try: + return int(output)/1024/1024 + except ValueError: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + + def get_processor_cores(self): + ret, output = shellutil.run_get_output("sysctl hw.ncpu |awk '{print $2}'") + if ret: + raise OSUtilError("Failed to get processor cores.") + + try: + return int(output) + except ValueError: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + + def set_scsi_disks_timeout(self, timeout): + if self._scsi_disks_timeout_set: + return + + ret, output = shellutil.run_get_output('sysctl kern.cam.da.default_timeout={0}'.format(timeout)) + if ret: + raise OSUtilError("Failed set SCSI disks timeout: {0}".format(output)) + self._scsi_disks_timeout_set = True + + def check_pid_alive(self, pid): + return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 + + @staticmethod + def _get_net_info(): + """ + There is no SIOCGIFCONF + on freeBSD - just parse ifconfig. + Returns strings: iface, inet4_addr, and mac + or 'None,None,None' if unable to parse. + We will sleep and retry as the network must be up. + """ + iface = '' + inet = '' + mac = '' + + err, output = shellutil.run_get_output('ifconfig -l ether', chk_err=False) + if err: + raise OSUtilError("Can't find ether interface:{0}".format(output)) + ifaces = output.split() + if not ifaces: + raise OSUtilError("Can't find ether interface.") + iface = ifaces[0] + + err, output = shellutil.run_get_output('ifconfig ' + iface, chk_err=False) + if err: + raise OSUtilError("Can't get info for interface:{0}".format(iface)) + + for line in output.split('\n'): + if line.find('inet ') != -1: + inet = line.split()[1] + elif line.find('ether ') != -1: + mac = line.split()[1] + logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) + + return iface, inet, mac diff --git a/azurelinuxagent/distro/redhat/osutil.py b/azurelinuxagent/common/osutil/redhat.py index 7f769a5..03084b6 100644 --- a/azurelinuxagent/distro/redhat/osutil.py +++ b/azurelinuxagent/common/osutil/redhat.py @@ -26,15 +26,15 @@ import struct import fcntl import time import base64 -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr, bytebuffer -from azurelinuxagent.exception import OSUtilError, CryptError -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr, bytebuffer +from azurelinuxagent.common.exception import OSUtilError, CryptError +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.osutil.default import DefaultOSUtil class Redhat6xOSUtil(DefaultOSUtil): def __init__(self): @@ -87,6 +87,9 @@ class Redhat6xOSUtil(DefaultOSUtil): fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', 'DHCP_HOSTNAME={0}'.format(hostname)) + def get_dhcp_lease_endpoint(self): + return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.leases') + class RedhatOSUtil(Redhat6xOSUtil): def __init__(self): super(RedhatOSUtil, self).__init__() @@ -113,3 +116,7 @@ class RedhatOSUtil(Redhat6xOSUtil): def openssl_to_openssh(self, input_file, output_file): DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) + + def get_dhcp_lease_endpoint(self): + # centos7 has this weird naming with double hyphen like /var/lib/dhclient/dhclient--eth0.lease + return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.lease') diff --git a/azurelinuxagent/distro/suse/osutil.py b/azurelinuxagent/common/osutil/suse.py index 8d6f5bf..f0ed0c0 100644 --- a/azurelinuxagent/distro/suse/osutil.py +++ b/azurelinuxagent/common/osutil/suse.py @@ -25,12 +25,12 @@ import array import struct import fcntl import time -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME -from azurelinuxagent.distro.default.osutil import DefaultOSUtil +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME +from azurelinuxagent.common.osutil.default import DefaultOSUtil class SUSE11OSUtil(DefaultOSUtil): def __init__(self): diff --git a/azurelinuxagent/distro/ubuntu/osutil.py b/azurelinuxagent/common/osutil/ubuntu.py index cc4b8ef..4032cf4 100644 --- a/azurelinuxagent/distro/ubuntu/osutil.py +++ b/azurelinuxagent/common/osutil/ubuntu.py @@ -16,20 +16,8 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil class Ubuntu14OSUtil(DefaultOSUtil): def __init__(self): @@ -44,6 +32,15 @@ class Ubuntu14OSUtil(DefaultOSUtil): def start_agent_service(self): return shellutil.run("service walinuxagent start", chk_err=False) + def remove_rules_files(self, rules_files=""): + pass + + def restore_rules_files(self, rules_files=""): + pass + + def get_dhcp_lease_endpoint(self): + return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') + class Ubuntu12OSUtil(Ubuntu14OSUtil): def __init__(self): super(Ubuntu12OSUtil, self).__init__() @@ -67,9 +64,3 @@ class UbuntuSnappyOSUtil(Ubuntu14OSUtil): def __init__(self): super(UbuntuSnappyOSUtil, self).__init__() self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' - - def remove_rules_files(self, rules_files=""): - pass - - def restore_rules_files(self, rules_files=""): - pass diff --git a/azurelinuxagent/distro/debian/loader.py b/azurelinuxagent/common/protocol/__init__.py index cc0c06f..fb7c273 100644 --- a/azurelinuxagent/distro/debian/loader.py +++ b/azurelinuxagent/common/protocol/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,8 +15,7 @@ # Requires Python 2.4+ and Openssl 1.0+ # - -def get_osutil(): - from azurelinuxagent.distro.debian.osutil import DebianOSUtil - return DebianOSUtil() +from azurelinuxagent.common.protocol.util import get_protocol_util, \ + OVF_FILE_NAME, \ + TAG_FILE_NAME diff --git a/azurelinuxagent/common/protocol/hostplugin.py b/azurelinuxagent/common/protocol/hostplugin.py new file mode 100644 index 0000000..6569604 --- /dev/null +++ b/azurelinuxagent/common/protocol/hostplugin.py @@ -0,0 +1,124 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.utils import textutil + +HOST_PLUGIN_PORT = 32526 +URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" +URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" +URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" +API_VERSION = "2015-09-01" + + +class HostPluginProtocol(object): + def __init__(self, endpoint): + if endpoint is None: + raise ProtocolError("Host plugin endpoint not provided") + self.is_initialized = False + self.is_available = False + self.api_versions = None + self.endpoint = endpoint + + def ensure_initialized(self): + if not self.is_initialized: + self.api_versions = self.get_api_versions() + self.is_available = API_VERSION in self.api_versions + self.is_initialized = True + return self.is_available + + def get_api_versions(self): + url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, + HOST_PLUGIN_PORT) + logger.info("getting API versions at [{0}]".format(url)) + try: + response = restutil.http_get(url) + if response.status != httpclient.OK: + logger.error( + "get API versions returned status code [{0}]".format( + response.status)) + return [] + return response.read() + except HttpError as e: + logger.error("get API versions failed with [{0}]".format(e)) + return [] + + def put_vm_status(self, status_blob, sas_url): + """ + Try to upload the VM status via the host plugin /status channel + :param sas_url: the blob SAS url to pass to the host plugin + :type status_blob: StatusBlob + """ + if not self.ensure_initialized(): + logger.error("host plugin channel is not available") + return + if status_blob is None or status_blob.vm_status is None: + logger.error("no status data was provided") + return + url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) + status = textutil.b64encode(status_blob.vm_status) + headers = {"x-ms-version": API_VERSION} + blob_headers = [{'headerName': 'x-ms-version', + 'headerValue': status_blob.__storage_version__}, + {'headerName': 'x-ms-blob-type', + 'headerValue': status_blob.type}] + data = json.dumps({'requestUri': sas_url, 'headers': blob_headers, + 'content': status}, sort_keys=True) + logger.info("put VM status at [{0}]".format(url)) + try: + response = restutil.http_put(url, data, headers) + if response.status != httpclient.OK: + logger.error("put VM status returned status code [{0}]".format( + response.status)) + except HttpError as e: + logger.error("put VM status failed with [{0}]".format(e)) + + def put_vm_log(self, content, container_id, deployment_id): + """ + Try to upload the given content to the host plugin + :param deployment_id: the deployment id, which is obtained from the + goal state (tenant name) + :param container_id: the container id, which is obtained from the + goal state + :param content: the binary content of the zip file to upload + :return: + """ + if not self.ensure_initialized(): + logger.error("host plugin channel is not available") + return + if content is None or container_id is None or deployment_id is None: + logger.error( + "invalid arguments passed: " + "[{0}], [{1}], [{2}]".format( + content, + container_id, + deployment_id)) + return + url = URI_FORMAT_PUT_LOG.format(self.endpoint, HOST_PLUGIN_PORT) + + headers = {"x-ms-vmagentlog-deploymentid": deployment_id, + "x-ms-vmagentlog-containerid": container_id} + logger.info("put VM log at [{0}]".format(url)) + try: + response = restutil.http_put(url, content, headers) + if response.status != httpclient.OK: + logger.error("put log returned status code [{0}]".format( + response.status)) + except HttpError as e: + logger.error("put log failed with [{0}]".format(e)) diff --git a/azurelinuxagent/protocol/metadata.py b/azurelinuxagent/common/protocol/metadata.py index 8a1656f..f86f72f 100644 --- a/azurelinuxagent/protocol/metadata.py +++ b/azurelinuxagent/common/protocol/metadata.py @@ -20,15 +20,15 @@ import json import shutil import os import time -from azurelinuxagent.exception import ProtocolError, HttpError -from azurelinuxagent.future import httpclient, ustr -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.restutil as restutil -import azurelinuxagent.utils.textutil as textutil -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.protocol.restapi import * +from azurelinuxagent.common.exception import ProtocolError, HttpError +from azurelinuxagent.common.future import httpclient, ustr +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.textutil as textutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol.restapi import * METADATA_ENDPOINT='169.254.169.254' APIVERSION='2015-05-01-preview' @@ -58,6 +58,8 @@ class MetadataProtocol(Protocol): self.apiversion, "&$expand=*") self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", self.apiversion, "&$expand=*") + self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions", + self.apiversion, "&$expand=*") self.provision_status_uri = BASE_URI.format(self.endpoint, "provisioningStatus", self.apiversion, "") @@ -140,13 +142,39 @@ class MetadataProtocol(Protocol): #TODO download and save certs return CertList() - def get_ext_handlers(self): + def get_vmagent_manifests(self, last_etag=None): + manifests = VMAgentManifestList() + data, etag = self._get_data(self.vmagent_uri) + if last_etag == None or last_etag < etag: + set_properties("vmAgentManifests", manifests.vmAgentManifests, data) + return manifests, etag + + def get_vmagent_pkgs(self, vmagent_manifest): + #Agent package is the same with extension handler + vmagent_pkgs = ExtHandlerPackageList() + data = None + for manifest_uri in vmagent_manifest.versionsManifestUris: + try: + data = self._get_data(manifest_uri.uri) + break + except ProtocolError as e: + logger.warn("Failed to get vmagent versions: {0}", e) + logger.info("Retry getting vmagent versions") + if data is None: + raise ProtocolError(("Failed to get versions for vm agent: {0}" + "").format(vmagent_manifest.family)) + set_properties("vmAgentVersions", vmagent_pkgs, data) + # TODO: What etag should this return? + return vmagent_pkgs + + def get_ext_handlers(self, last_etag=None): headers = { "x-ms-vmagent-public-x509-cert": self._get_trans_cert() } ext_list = ExtHandlerList() data, etag = self._get_data(self.ext_uri, headers=headers) - set_properties("extensionHandlers", ext_list.extHandlers, data) + if last_etag == None or last_etag < etag: + set_properties("extensionHandlers", ext_list.extHandlers, data) return ext_list, etag def get_ext_handler_pkgs(self, ext_handler): @@ -163,12 +191,12 @@ class MetadataProtocol(Protocol): return ext_handler_pkgs def report_provision_status(self, provision_status): - validata_param('provisionStatus', provision_status, ProvisionStatus) + validate_param('provisionStatus', provision_status, ProvisionStatus) data = get_properties(provision_status) self._put_data(self.provision_status_uri, data) def report_vm_status(self, vm_status): - validata_param('vmStatus', vm_status, VMStatus) + validate_param('vmStatus', vm_status, VMStatus) data = get_properties(vm_status) #TODO code field is not implemented for metadata protocol yet. Remove it handler_statuses = data['vmAgent']['extensionHandlers'] @@ -181,14 +209,14 @@ class MetadataProtocol(Protocol): self._put_data(self.vm_status_uri, data) def report_ext_status(self, ext_handler_name, ext_name, ext_status): - validata_param('extensionStatus', ext_status, ExtensionStatus) + validate_param('extensionStatus', ext_status, ExtensionStatus) data = get_properties(ext_status) uri = self.ext_status_uri.format(ext_name) self._put_data(uri, data) def report_event(self, events): #TODO disable telemetry for azure stack test - #validata_param('events', events, TelemetryEventList) + #validate_param('events', events, TelemetryEventList) #data = get_properties(events) #self._post_data(self.event_uri, data) pass diff --git a/azurelinuxagent/protocol/ovfenv.py b/azurelinuxagent/common/protocol/ovfenv.py index de6791c..4901871 100644 --- a/azurelinuxagent/protocol/ovfenv.py +++ b/azurelinuxagent/common/protocol/ovfenv.py @@ -23,11 +23,11 @@ import os import re import shutil import xml.dom.minidom as minidom -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.utils.textutil import parse_doc, findall, find, findtext +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext OVF_VERSION = "1.0" OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" @@ -44,7 +44,7 @@ class OvfEnv(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("ovf-env is None") - logger.verb("Load ovf-env.xml") + logger.verbose("Load ovf-env.xml") self.hostname = None self.username = None self.user_password = None diff --git a/azurelinuxagent/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py index fbd29ed..7f00488 100644 --- a/azurelinuxagent/protocol/restapi.py +++ b/azurelinuxagent/common/protocol/restapi.py @@ -21,12 +21,12 @@ import copy import re import json import xml.dom.minidom -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, HttpError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.restutil as restutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError, HttpError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.restutil as restutil -def validata_param(name, val, expected_type): +def validate_param(name, val, expected_type): if val is None: raise ProtocolError("{0} is None".format(name)) if not isinstance(val, expected_type): @@ -35,7 +35,7 @@ def validata_param(name, val, expected_type): def set_properties(name, obj, data): if isinstance(obj, DataContract): - validata_param("Property '{0}'".format(name), data, dict) + validate_param("Property '{0}'".format(name), data, dict) for prob_name, prob_val in data.items(): prob_full_name = "{0}.{1}".format(name, prob_name) try: @@ -47,7 +47,7 @@ def set_properties(name, obj, data): setattr(obj, prob_name, prob) return obj elif isinstance(obj, DataContractList): - validata_param("List '{0}'".format(name), data, list) + validate_param("List '{0}'".format(name), data, list) for item_data in data: item = obj.item_cls() item = set_properties(name, item, item_data) @@ -102,6 +102,20 @@ class CertList(DataContract): def __init__(self): self.certificates = DataContractList(Cert) +#TODO: confirm vmagent manifest schema +class VMAgentManifestUri(DataContract): + def __init__(self, uri=None): + self.uri = uri + +class VMAgentManifest(DataContract): + def __init__(self, family=None): + self.family = family + self.versionsManifestUris = DataContractList(VMAgentManifestUri) + +class VMAgentManifestList(DataContract): + def __init__(self): + self.vmAgentManifests = DataContractList(VMAgentManifest) + class Extension(DataContract): def __init__(self, name=None, sequenceNumber=None, publicSettings=None, protectedSettings=None, certificateThumbprint=None): @@ -140,6 +154,8 @@ class ExtHandlerPackage(DataContract): def __init__(self, version = None): self.version = version self.uris = DataContractList(ExtHandlerPackageUri) + # TODO update the naming to align with metadata protocol + self.isinternal = False class ExtHandlerPackageList(DataContract): def __init__(self): @@ -222,6 +238,12 @@ class Protocol(DataContract): def get_certs(self): raise NotImplementedError() + def get_vmagent_manifests(self): + raise NotImplementedError() + + def get_vmagent_pkgs(self): + raise NotImplementedError() + def get_ext_handlers(self): raise NotImplementedError() diff --git a/azurelinuxagent/distro/default/protocolUtil.py b/azurelinuxagent/common/protocol/util.py index 34466cf..7e7a74f 100644 --- a/azurelinuxagent/distro/default/protocolUtil.py +++ b/azurelinuxagent/common/protocol/util.py @@ -21,16 +21,19 @@ import re import shutil import time import threading -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, OSUtilError, \ +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError, OSUtilError, \ ProtocolNotFoundError, DhcpError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.protocol.ovfenv import OvfEnv -from azurelinuxagent.protocol.wire import WireProtocol -from azurelinuxagent.protocol.metadata import MetadataProtocol, METADATA_ENDPOINT -import azurelinuxagent.utils.shellutil as shellutil +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.dhcp import get_dhcp_handler +from azurelinuxagent.common.protocol.ovfenv import OvfEnv +from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.common.protocol.metadata import MetadataProtocol, \ + METADATA_ENDPOINT +import azurelinuxagent.common.utils.shellutil as shellutil OVF_FILE_NAME = "ovf-env.xml" @@ -46,15 +49,19 @@ PROBE_INTERVAL = 10 ENDPOINT_FILE_NAME = "WireServerEndpoint" +def get_protocol_util(): + return ProtocolUtil() + class ProtocolUtil(object): """ ProtocolUtil handles initialization for protocol instance. 2 protocol types are invoked, wire protocol and metadata protocols. """ - def __init__(self, distro): - self.distro = distro - self.protocol = None + def __init__(self): self.lock = threading.Lock() + self.protocol = None + self.osutil = get_osutil() + self.dhcp_handler = get_dhcp_handler() def copy_ovf_env(self): """ @@ -65,7 +72,7 @@ class ProtocolUtil(object): ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) try: - self.distro.osutil.mount_dvd() + self.osutil.mount_dvd() ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) ovfenv = OvfEnv(ovfxml) ovfxml = re.sub("<UserPassword>.*?<", "<UserPassword>*<", ovfxml) @@ -81,8 +88,8 @@ class ProtocolUtil(object): raise ProtocolError(ustr(e)) try: - self.distro.osutil.umount_dvd() - self.distro.osutil.eject_dvd() + self.osutil.umount_dvd() + self.osutil.eject_dvd() except OSUtilError as e: logger.warn(ustr(e)) @@ -114,23 +121,25 @@ class ProtocolUtil(object): raise OSUtilError(ustr(e)) def _detect_wire_protocol(self): - endpoint = self.distro.dhcp_handler.endpoint + endpoint = self.dhcp_handler.endpoint if endpoint is None: logger.info("WireServer endpoint is not found. Rerun dhcp handler") try: - self.distro.dhcp_handler.run() + self.dhcp_handler.run() except DhcpError as e: raise ProtocolError(ustr(e)) - endpoint = self.distro.dhcp_handler.endpoint + endpoint = self.dhcp_handler.endpoint try: protocol = WireProtocol(endpoint) protocol.detect() self._set_wireserver_endpoint(endpoint) + self.save_protocol("WireProtocol") return protocol except ProtocolError as e: logger.info("WireServer is not responding. Reset endpoint") - self.distro.dhcp_handler.endpoint = None + self.dhcp_handler.endpoint = None + self.dhcp_handler.skip_cache = True raise e def _detect_metadata_protocol(self): @@ -138,7 +147,9 @@ class ProtocolUtil(object): protocol.detect() #Only allow root access METADATA_ENDPOINT - self.distro.osutil.set_admin_access_to_ip(METADATA_ENDPOINT) + self.osutil.set_admin_access_to_ip(METADATA_ENDPOINT) + + self.save_protocol("MetadataProtocol") return protocol @@ -146,9 +157,8 @@ class ProtocolUtil(object): """ Probe protocol endpoints in turn. """ - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) - if os.path.isfile(protocol_file_path): - os.remove(protocol_file_path) + self.clear_protocol() + for retry in range(0, MAX_RETRY): for protocol in protocols: try: @@ -174,7 +184,7 @@ class ProtocolUtil(object): protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) if not os.path.isfile(protocol_file_path): - raise ProtocolError("No protocl found") + raise ProtocolNotFoundError("No protocol found") protocol_name = fileutil.read_file(protocol_file_path) if protocol_name == "WireProtocol": @@ -186,23 +196,61 @@ class ProtocolUtil(object): raise ProtocolNotFoundError(("Unknown protocol: {0}" "").format(protocol_name)) - def detect_protocol(self): + def save_protocol(self, protocol_name): + """ + Save protocol endpoint + """ + protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + try: + fileutil.write_file(protocol_file_path, protocol_name) + except IOError as e: + logger.error("Failed to save protocol endpoint: {0}", e) + + + def clear_protocol(self): + """ + Cleanup previous saved endpoint. + """ + logger.info("Clean protocol") + self.protocol = None + protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + if not os.path.isfile(protocol_file_path): + return + + try: + os.remove(protocol_file_path) + except IOError as e: + logger.error("Failed to clear protocol endpoint: {0}", e) + + def get_protocol(self): """ Detect protocol by endpoints :returns: protocol instance """ - logger.info("Detect protocol endpoints") - protocols = ["WireProtocol", "MetadataProtocol"] self.lock.acquire() + try: - if self.protocol is None: - self.protocol = self._detect_protocol(protocols) + if self.protocol is not None: + return self.protocol + + try: + self.protocol = self._get_protocol() + return self.protocol + except ProtocolNotFoundError: + pass + + logger.info("Detect protocol endpoints") + protocols = ["WireProtocol", "MetadataProtocol"] + self.protocol = self._detect_protocol(protocols) + return self.protocol + finally: self.lock.release() - def detect_protocol_by_file(self): + + def get_protocol_by_file(self): """ Detect protocol by tag file. @@ -211,33 +259,27 @@ class ProtocolUtil(object): :returns: protocol instance """ - logger.info("Detect protocol by file") self.lock.acquire() + try: - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) - if self.protocol is None: - protocols = [] - if os.path.isfile(tag_file_path): - protocols.append("MetadataProtocol") - else: - protocols.append("WireProtocol") - self.protocol = self._detect_protocol(protocols) - finally: - self.lock.release() - return self.protocol + if self.protocol is not None: + return self.protocol - def get_protocol(self): - """ - Get protocol instance based on previous detecting result. + try: + self.protocol = self._get_protocol() + return self.protocol + except ProtocolNotFoundError: + pass - :returns protocol instance - """ - self.lock.acquire() - try: - if self.protocol is None: - self.protocol = self._get_protocol() + logger.info("Detect protocol by file") + tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) + protocols = [] + if os.path.isfile(tag_file_path): + protocols.append("MetadataProtocol") + else: + protocols.append("WireProtocol") + self.protocol = self._detect_protocol(protocols) return self.protocol + finally: self.lock.release() - return self.protocol - diff --git a/azurelinuxagent/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py index 7b5ffe8..29a1663 100644 --- a/azurelinuxagent/protocol/wire.py +++ b/azurelinuxagent/common/protocol/wire.py @@ -16,25 +16,17 @@ # # Requires Python 2.4+ and Openssl 1.0+ -import os -import json -import re import time -import traceback import xml.sax.saxutils as saxutils -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, HttpError, \ - ProtocolNotFoundError -from azurelinuxagent.future import ustr, httpclient, bytebuffer -import azurelinuxagent.utils.restutil as restutil -from azurelinuxagent.utils.textutil import parse_doc, findall, find, findtext, \ - getattrib, gettext, remove_bom, \ - get_bytes_from_pem -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.protocol.restapi import * +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import ProtocolNotFoundError +from azurelinuxagent.common.future import httpclient, bytebuffer +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext, \ + getattrib, gettext, remove_bom, get_bytes_from_pem +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol VERSION_INFO_URI = "http://{0}/?comp=versions" GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" @@ -58,25 +50,37 @@ TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" PROTOCOL_VERSION = "2012-11-30" ENDPOINT_FINE_NAME = "WireServer" -SHORT_WAITING_INTERVAL = 1 # 1 second -LONG_WAITING_INTERVAL = 15 # 15 seconds +SHORT_WAITING_INTERVAL = 1 # 1 second +LONG_WAITING_INTERVAL = 15 # 15 seconds + + +class UploadError(HttpError): + pass + class WireProtocolResourceGone(ProtocolError): pass + class WireProtocol(Protocol): - """Slim layer to adapte wire protocol data to metadata protocol interface""" + """Slim layer to adapt wire protocol data to metadata protocol interface""" + + # TODO: Clean-up goal state processing + # At present, some methods magically update GoalState (e.g., get_vmagent_manifests), others (e.g., get_vmagent_pkgs) + # assume its presence. A better approach would make an explicit update call that returns the incarnation number and + # establishes that number the "context" for all other calls (either by updating the internal state of the protocol or + # by having callers pass the incarnation number to the method). def __init__(self, endpoint): if endpoint is None: - raise ProtocolError("WireProtocl endpoint is None") + raise ProtocolError("WireProtocol endpoint is None") self.endpoint = endpoint self.client = WireClient(self.endpoint) def detect(self): self.client.check_wire_protocol_version() - trans_prv_file = os.path.join(conf.get_lib_dir(), + trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) @@ -102,23 +106,35 @@ class WireProtocol(Protocol): certificates = self.client.get_certs() return certificates.cert_list + def get_vmagent_manifests(self): + # Update goal state to get latest extensions config + self.client.update_goal_state() + goal_state = self.client.get_goal_state() + ext_conf = self.client.get_ext_conf() + return ext_conf.vmagent_manifests, goal_state.incarnation + + def get_vmagent_pkgs(self, vmagent_manifest): + goal_state = self.client.get_goal_state() + man = self.client.get_gafamily_manifest(vmagent_manifest, goal_state) + return man.pkg_list + def get_ext_handlers(self): - logger.verb("Get extension handler config") - #Update goal state to get latest extensions config + logger.verbose("Get extension handler config") + # Update goal state to get latest extensions config self.client.update_goal_state() goal_state = self.client.get_goal_state() ext_conf = self.client.get_ext_conf() - #In wire protocol, incarnation is equivalent to ETag + # In wire protocol, incarnation is equivalent to ETag return ext_conf.ext_handlers, goal_state.incarnation def get_ext_handler_pkgs(self, ext_handler): - logger.verb("Get extension handler package") + logger.verbose("Get extension handler package") goal_state = self.client.get_goal_state() man = self.client.get_ext_manifest(ext_handler, goal_state) return man.pkg_list - + def report_provision_status(self, provision_status): - validata_param("provision_status", provision_status, ProvisionStatus) + validate_param("provision_status", provision_status, ProvisionStatus) if provision_status.status is not None: self.client.report_health(provision_status.status, @@ -129,18 +145,19 @@ class WireProtocol(Protocol): self.client.report_role_prop(thumbprint) def report_vm_status(self, vm_status): - validata_param("vm_status", vm_status, VMStatus) + validate_param("vm_status", vm_status, VMStatus) self.client.status_blob.set_vm_status(vm_status) self.client.upload_status_blob() def report_ext_status(self, ext_handler_name, ext_name, ext_status): - validata_param("ext_status", ext_status, ExtensionStatus) + validate_param("ext_status", ext_status, ExtensionStatus) self.client.status_blob.set_ext_status(ext_handler_name, ext_status) def report_event(self, events): - validata_param("events", events, TelemetryEventList) + validate_param("events", events, TelemetryEventList) self.client.report_event(events) + def _build_role_properties(container_id, role_instance_id, thumbprint): xml = (u"<?xml version=\"1.0\" encoding=\"utf-8\"?>" u"<RoleProperties>" @@ -159,9 +176,10 @@ def _build_role_properties(container_id, role_instance_id, thumbprint): u"").format(container_id, role_instance_id, thumbprint) return xml + def _build_health_report(incarnation, container_id, role_instance_id, status, substatus, description): - #Escape '&', '<' and '>' + # Escape '&', '<' and '>' description = saxutils.escape(ustr(description)) detail = u'' if substatus is not None: @@ -195,21 +213,25 @@ def _build_health_report(incarnation, container_id, role_instance_id, detail) return xml + """ Convert VMStatus object to status blob format """ + + def ga_status_to_v1(ga_status): formatted_msg = { - 'lang' : 'en-US', - 'message' : ga_status.message + 'lang': 'en-US', + 'message': ga_status.message } v1_ga_status = { - 'version' : ga_status.version, - 'status' : ga_status.status, - 'formattedMessage' : formatted_msg + 'version': ga_status.version, + 'status': ga_status.status, + 'formattedMessage': formatted_msg } return v1_ga_status + def ext_substatus_to_v1(sub_status_list): status_list = [] for substatus in sub_status_list: @@ -217,7 +239,7 @@ def ext_substatus_to_v1(sub_status_list): "name": substatus.name, "status": substatus.status, "code": substatus.code, - "formattedMessage":{ + "formattedMessage": { "lang": "en-US", "message": substatus.message } @@ -225,20 +247,21 @@ def ext_substatus_to_v1(sub_status_list): status_list.append(status) return status_list + def ext_status_to_v1(ext_name, ext_status): if ext_status is None: return None timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) v1_ext_status = { - "status":{ + "status": { "name": ext_name, "configurationAppliedTime": ext_status.configurationAppliedTime, "operation": ext_status.operation, "status": ext_status.status, "code": ext_status.code, "formattedMessage": { - "lang":"en-US", + "lang": "en-US", "message": ext_status.message } }, @@ -248,51 +271,53 @@ def ext_status_to_v1(ext_name, ext_status): if len(v1_sub_status) != 0: v1_ext_status['substatus'] = v1_sub_status return v1_ext_status - + + def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): v1_handler_status = { - 'handlerVersion' : handler_status.version, - 'handlerName' : handler_status.name, - 'status' : handler_status.status, + 'handlerVersion': handler_status.version, + 'handlerName': handler_status.name, + 'status': handler_status.status, 'code': handler_status.code } if handler_status.message is not None: v1_handler_status["formattedMessage"] = { - "lang":"en-US", + "lang": "en-US", "message": handler_status.message } if len(handler_status.extensions) > 0: - #Currently, no more than one extension per handler + # Currently, no more than one extension per handler ext_name = handler_status.extensions[0] ext_status = ext_statuses.get(ext_name) v1_ext_status = ext_status_to_v1(ext_name, ext_status) if ext_status is not None and v1_ext_status is not None: v1_handler_status["runtimeSettingsStatus"] = { - 'settingsStatus' : v1_ext_status, - 'sequenceNumber' : ext_status.sequenceNumber + 'settingsStatus': v1_ext_status, + 'sequenceNumber': ext_status.sequenceNumber } return v1_handler_status + def vm_status_to_v1(vm_status, ext_statuses): timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) v1_ga_status = ga_status_to_v1(vm_status.vmAgent) v1_handler_status_list = [] for handler_status in vm_status.vmAgent.extensionHandlers: - v1_handler_status = ext_handler_status_to_v1(handler_status, + v1_handler_status = ext_handler_status_to_v1(handler_status, ext_statuses, timestamp) if v1_handler_status is not None: v1_handler_status_list.append(v1_handler_status) v1_agg_status = { 'guestAgentStatus': v1_ga_status, - 'handlerAggregateStatus' : v1_handler_status_list + 'handlerAggregateStatus': v1_handler_status_list } v1_vm_status = { - 'version' : '1.0', - 'timestampUTC' : timestamp, - 'aggregateStatus' : v1_agg_status + 'version': '1.0', + 'timestampUTC': timestamp, + 'aggregateStatus': v1_agg_status } return v1_vm_status @@ -302,15 +327,17 @@ class StatusBlob(object): self.vm_status = None self.ext_statuses = {} self.client = client + self.type = None + self.data = None def set_vm_status(self, vm_status): - validata_param("vmAgent", vm_status, VMStatus) + validate_param("vmAgent", vm_status, VMStatus) self.vm_status = vm_status - + def set_ext_status(self, ext_handler_name, ext_status): - validata_param("extensionStatus", ext_status, ExtensionStatus) - self.ext_statuses[ext_handler_name]= ext_status - + validate_param("extensionStatus", ext_status, ExtensionStatus) + self.ext_statuses[ext_handler_name] = ext_status + def to_json(self): report = vm_status_to_v1(self.vm_status, self.ext_statuses) return json.dumps(report) @@ -318,29 +345,33 @@ class StatusBlob(object): __storage_version__ = "2014-02-14" def upload(self, url): - #TODO upload extension only if content has changed - logger.verb("Upload status blob") - blob_type = self.get_blob_type(url) - - data = self.to_json() + # TODO upload extension only if content has changed + logger.verbose("Upload status blob") + upload_successful = False + self.type = self.get_blob_type(url) + self.data = self.to_json() try: - if blob_type == "BlockBlob": - self.put_block_blob(url, data) - elif blob_type == "PageBlob": - self.put_page_blob(url, data) + if self.type == "BlockBlob": + self.put_block_blob(url, self.data) + elif self.type == "PageBlob": + self.put_page_blob(url, self.data) else: - raise ProtocolError("Unknown blob type: {0}".format(blob_type)) + raise ProtocolError("Unknown blob type: {0}".format(self.type)) except HttpError as e: - raise ProtocolError("Failed to upload status blob: {0}".format(e)) + logger.warn("Initial upload failed [{0}]".format(e)) + else: + logger.verbose("Uploading status blob succeeded") + upload_successful = True + return upload_successful def get_blob_type(self, url): - #Check blob type - logger.verb("Check blob type.") + # Check blob type + logger.verbose("Check blob type.") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) try: resp = self.client.call_storage_service(restutil.http_head, url, { - "x-ms-date" : timestamp, - 'x-ms-version' : self.__class__.__storage_version__ + "x-ms-date": timestamp, + 'x-ms-version': self.__class__.__storage_version__ }) except HttpError as e: raise ProtocolError((u"Failed to get status blob type: {0}" @@ -350,86 +381,76 @@ class StatusBlob(object): "").format(resp.status)) blob_type = resp.getheader("x-ms-blob-type") - logger.verb("Blob type={0}".format(blob_type)) + logger.verbose("Blob type={0}".format(blob_type)) return blob_type def put_block_blob(self, url, data): - logger.verb("Upload block blob") + logger.verbose("Upload block blob") timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - try: - resp = self.client.call_storage_service(restutil.http_put, url, - data, { - "x-ms-date" : timestamp, - "x-ms-blob-type" : "BlockBlob", - "Content-Length": ustr(len(data)), - "x-ms-version" : self.__class__.__storage_version__ - }) - except HttpError as e: - raise ProtocolError((u"Failed to upload block blob: {0}" - u"").format(e)) + resp = self.client.call_storage_service(restutil.http_put, url, data, + { + "x-ms-date": timestamp, + "x-ms-blob-type": "BlockBlob", + "Content-Length": ustr(len(data)), + "x-ms-version": self.__class__.__storage_version__ + }) if resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to upload block blob: {0}" - "").format(resp.status)) + raise UploadError( + "Failed to upload block blob: {0}".format(resp.status)) def put_page_blob(self, url, data): - logger.verb("Replace old page blob") + logger.verbose("Replace old page blob") - #Convert string into bytes - data=bytearray(data, encoding='utf-8') + # Convert string into bytes + data = bytearray(data, encoding='utf-8') timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - #Align to 512 bytes + # Align to 512 bytes page_blob_size = int((len(data) + 511) / 512) * 512 - try: - resp = self.client.call_storage_service(restutil.http_put, url, - "", { - "x-ms-date" : timestamp, - "x-ms-blob-type" : "PageBlob", - "Content-Length": "0", - "x-ms-blob-content-length" : ustr(page_blob_size), - "x-ms-version" : self.__class__.__storage_version__ - }) - except HttpError as e: - raise ProtocolError((u"Failed to clean up page blob: {0}" - u"").format(e)) + resp = self.client.call_storage_service(restutil.http_put, url, "", + { + "x-ms-date": timestamp, + "x-ms-blob-type": "PageBlob", + "Content-Length": "0", + "x-ms-blob-content-length": ustr(page_blob_size), + "x-ms-version": self.__class__.__storage_version__ + }) if resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to clean up page blob: {0}" - "").format(resp.status)) + raise UploadError( + "Failed to clean up page blob: {0}".format(resp.status)) if url.count("?") < 0: url = "{0}?comp=page".format(url) else: url = "{0}&comp=page".format(url) - logger.verb("Upload page blob") - page_max = 4 * 1024 * 1024 #Max page size: 4MB + logger.verbose("Upload page blob") + page_max = 4 * 1024 * 1024 # Max page size: 4MB start = 0 end = 0 while end < len(data): end = min(len(data), start + page_max) content_size = end - start - #Align to 512 bytes + # Align to 512 bytes page_end = int((end + 511) / 512) * 512 buf_size = page_end - start buf = bytearray(buf_size) buf[0: content_size] = data[start: end] - try: - resp = self.client.call_storage_service(restutil.http_put, url, - bytebuffer(buf), { - "x-ms-date" : timestamp, - "x-ms-range" : "bytes={0}-{1}".format(start, page_end - 1), - "x-ms-page-write" : "update", - "x-ms-version" : self.__class__.__storage_version__, + resp = self.client.call_storage_service( + restutil.http_put, url, bytebuffer(buf), + { + "x-ms-date": timestamp, + "x-ms-range": "bytes={0}-{1}".format(start, page_end - 1), + "x-ms-page-write": "update", + "x-ms-version": self.__class__.__storage_version__, "Content-Length": ustr(page_end - start) }) - except HttpError as e: - raise ProtocolError((u"Failed to upload page blob: {0}" - u"").format(e)) if resp is None or resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to upload page blob: {0}" - "").format(resp.status)) + raise UploadError( + "Failed to upload page blob: {0}".format(resp.status)) start = end + def event_param_to_v1(param): param_format = '<Param Name="{0}" Value={1} T="{2}" />' param_type = type(param.value) @@ -447,6 +468,7 @@ def event_param_to_v1(param): return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), attr_type) + def event_to_v1(event): params = "" for param in event.parameters: @@ -456,6 +478,7 @@ def event_to_v1(event): '</Event>').format(event.eventId, params) return event_str + class WireClient(object): def __init__(self, endpoint): logger.info("Wire server endpoint:{0}", endpoint) @@ -469,6 +492,7 @@ class WireClient(object): self.last_request = 0 self.req_count = 0 self.status_blob = StatusBlob(self) + self.host_plugin = HostPluginProtocol(self.endpoint) def prevent_throttling(self): """ @@ -476,15 +500,15 @@ class WireClient(object): """ now = time.time() if now - self.last_request < 1: - logger.verb("Last request issued less than 1 second ago") - logger.verb("Sleep {0} second to avoid throttling.", + logger.verbose("Last request issued less than 1 second ago") + logger.verbose("Sleep {0} second to avoid throttling.", SHORT_WAITING_INTERVAL) time.sleep(SHORT_WAITING_INTERVAL) self.last_request = now self.req_count += 1 if self.req_count % 3 == 0: - logger.verb("Sleep {0} second to avoid throttling.", + logger.verbose("Sleep {0} second to avoid throttling.", SHORT_WAITING_INTERVAL) time.sleep(SHORT_WAITING_INTERVAL) self.req_count = 0 @@ -498,7 +522,7 @@ class WireClient(object): resp = http_req(*args, **kwargs) if resp.status == httpclient.FORBIDDEN: logger.warn("Sending too much request to wire server") - logger.info("Sleep {0} second to avoid throttling.", + logger.info("Sleep {0} second to avoid throttling.", LONG_WAITING_INTERVAL) time.sleep(LONG_WAITING_INTERVAL) elif resp.status == httpclient.GONE: @@ -518,12 +542,12 @@ class WireClient(object): def fetch_config(self, uri, headers): try: - resp = self.call_wireserver(restutil.http_get, uri, + resp = self.call_wireserver(restutil.http_get, uri, headers=headers) except HttpError as e: raise ProtocolError(ustr(e)) - if(resp.status != httpclient.OK): + if (resp.status != httpclient.OK): raise ProtocolError("{0} - {1}".format(resp.status, uri)) return self.decode_config(resp.read()) @@ -550,7 +574,7 @@ class WireClient(object): resp = http_req(*args, **kwargs) if resp.status == httpclient.SERVICE_UNAVAILABLE: logger.warn("Storage service is not avaible temporaryly") - logger.info("Will retry later, in {0} seconds", + logger.info("Will retry later, in {0} seconds", LONG_WAITING_INTERVAL) time.sleep(LONG_WAITING_INTERVAL) else: @@ -560,30 +584,29 @@ class WireClient(object): def fetch_manifest(self, version_uris): for version_uri in version_uris: - logger.verb("Fetch ext handler manifest: {0}", version_uri.uri) + logger.verbose("Fetch ext handler manifest: {0}", version_uri.uri) try: - resp = self.call_storage_service(restutil.http_get, - version_uri.uri, None, + resp = self.call_storage_service(restutil.http_get, + version_uri.uri, None, chk_proxy=True) except HttpError as e: raise ProtocolError(ustr(e)) if resp.status == httpclient.OK: return self.decode_config(resp.read()) - logger.warn("Failed to fetch ExtensionManifest: {0}, {1}", + logger.warn("Failed to fetch ExtensionManifest: {0}, {1}", resp.status, version_uri.uri) - logger.info("Will retry later, in {0} seconds", + logger.info("Will retry later, in {0} seconds", LONG_WAITING_INTERVAL) time.sleep(LONG_WAITING_INTERVAL) raise ProtocolError(("Failed to fetch ExtensionManifest from " "all sources")) - def update_hosting_env(self, goal_state): if goal_state.hosting_env_uri is None: raise ProtocolError("HostingEnvironmentConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) - xml_text = self.fetch_config(goal_state.hosting_env_uri, + xml_text = self.fetch_config(goal_state.hosting_env_uri, self.get_header()) self.save_cache(local_file, xml_text) self.hosting_env = HostingEnv(xml_text) @@ -592,7 +615,7 @@ class WireClient(object): if goal_state.shared_conf_uri is None: raise ProtocolError("SharedConfig uri is empty") local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) - xml_text = self.fetch_config(goal_state.shared_conf_uri, + xml_text = self.fetch_config(goal_state.shared_conf_uri, self.get_header()) self.save_cache(local_file, xml_text) self.shared_conf = SharedConfig(xml_text) @@ -601,7 +624,7 @@ class WireClient(object): if goal_state.certs_uri is None: return local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) - xml_text = self.fetch_config(goal_state.certs_uri, + xml_text = self.fetch_config(goal_state.certs_uri, self.get_header_for_cert()) self.save_cache(local_file, xml_text) self.certs = Certificates(self, xml_text) @@ -612,31 +635,31 @@ class WireClient(object): self.ext_conf = ExtensionsConfig(None) return incarnation = goal_state.incarnation - local_file = os.path.join(conf.get_lib_dir(), - EXT_CONF_FILE_NAME.format(incarnation)) + local_file = os.path.join(conf.get_lib_dir(), + EXT_CONF_FILE_NAME.format(incarnation)) xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) self.save_cache(local_file, xml_text) self.ext_conf = ExtensionsConfig(xml_text) - + def update_goal_state(self, forced=False, max_retry=3): uri = GOAL_STATE_URI.format(self.endpoint) xml_text = self.fetch_config(uri, self.get_header()) goal_state = GoalState(xml_text) - incarnation_file = os.path.join(conf.get_lib_dir(), + incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) if not forced: last_incarnation = None - if(os.path.isfile(incarnation_file)): + if (os.path.isfile(incarnation_file)): last_incarnation = fileutil.read_file(incarnation_file) new_incarnation = goal_state.incarnation if last_incarnation is not None and \ - last_incarnation == new_incarnation: - #Goalstate is not updated. + last_incarnation == new_incarnation: + # Goalstate is not updated. return - #Start updating goalstate, retry on 410 + # Start updating goalstate, retry on 410 for retry in range(0, max_retry): try: self.goal_state = goal_state @@ -657,8 +680,8 @@ class WireClient(object): raise ProtocolError("Exceeded max retry updating goal state") def get_goal_state(self): - if(self.goal_state is None): - incarnation_file = os.path.join(conf.get_lib_dir(), + if (self.goal_state is None): + incarnation_file = os.path.join(conf.get_lib_dir(), INCARNATION_FILE_NAME) incarnation = self.fetch_cache(incarnation_file) @@ -669,21 +692,21 @@ class WireClient(object): return self.goal_state def get_hosting_env(self): - if(self.hosting_env is None): + if (self.hosting_env is None): local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) xml_text = self.fetch_cache(local_file) self.hosting_env = HostingEnv(xml_text) return self.hosting_env def get_shared_conf(self): - if(self.shared_conf is None): + if (self.shared_conf is None): local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) xml_text = self.fetch_cache(local_file) self.shared_conf = SharedConfig(xml_text) return self.shared_conf def get_certs(self): - if(self.certs is None): + if (self.certs is None): local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) xml_text = self.fetch_cache(local_file) self.certs = Certificates(self, xml_text) @@ -692,7 +715,7 @@ class WireClient(object): return self.certs def get_ext_conf(self): - if(self.ext_conf is None): + if (self.ext_conf is None): goal_state = self.get_goal_state() if goal_state.ext_uri is None: self.ext_conf = ExtensionsConfig(None) @@ -711,6 +734,14 @@ class WireClient(object): self.save_cache(local_file, xml_text) return ExtensionManifest(xml_text) + def get_gafamily_manifest(self, vmagent_manifest, goal_state): + local_file = MANIFEST_FILE_NAME.format(vmagent_manifest.family, + goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) + xml_text = self.fetch_manifest(vmagent_manifest.versionsManifestUris) + fileutil.write_file(local_file, xml_text) + return ExtensionManifest(xml_text) + def check_wire_protocol_version(self): uri = VERSION_INFO_URI.format(self.endpoint) version_info_xml = self.fetch_config(uri, None) @@ -726,11 +757,13 @@ class WireClient(object): error = ("Agent supported wire protocol version: {0} was not " "advised by Fabric.").format(PROTOCOL_VERSION) raise ProtocolNotFoundError(error) - + def upload_status_blob(self): ext_conf = self.get_ext_conf() if ext_conf.status_upload_blob is not None: - self.status_blob.upload(ext_conf.status_upload_blob) + if not self.status_blob.upload(ext_conf.status_upload_blob): + self.host_plugin.put_vm_status(self.status_blob, + ext_conf.status_upload_blob) def report_role_prop(self, thumbprint): goal_state = self.get_goal_state() @@ -742,7 +775,7 @@ class WireClient(object): headers = self.get_header_for_xml_content() try: resp = self.call_wireserver(restutil.http_post, role_prop_uri, - role_prop, headers = headers) + role_prop, headers=headers) except HttpError as e: raise ProtocolError((u"Failed to send role properties: {0}" u"").format(e)) @@ -763,7 +796,7 @@ class WireClient(object): headers = self.get_header_for_xml_content() try: resp = self.call_wireserver(restutil.http_post, health_report_uri, - health_report, headers = headers) + health_report, headers=headers, max_retry=8) except HttpError as e: raise ProtocolError((u"Failed to send provision status: {0}" u"").format(e)) @@ -775,8 +808,8 @@ class WireClient(object): uri = TELEMETRY_URI.format(self.endpoint) data_format = ('<?xml version="1.0"?>' '<TelemetryData version="1.0">' - '<Provider id="{0}">{1}' - '</Provider>' + '<Provider id="{0}">{1}' + '</Provider>' '</TelemetryData>') data = data_format.format(provider_id, event_str) try: @@ -786,12 +819,12 @@ class WireClient(object): raise ProtocolError("Failed to send events:{0}".format(e)) if resp.status != httpclient.OK: - logger.verb(resp.read()) + logger.verbose(resp.read()) raise ProtocolError("Failed to send events:{0}".format(resp.status)) def report_event(self, event_list): buf = {} - #Group events by providerId + # Group events by providerId for event in event_list.events: if event.providerId not in buf: buf[event.providerId] = "" @@ -804,22 +837,22 @@ class WireClient(object): buf[event.providerId] = "" buf[event.providerId] = buf[event.providerId] + event_str - #Send out all events left in buffer. + # Send out all events left in buffer. for provider_id in list(buf.keys()): if len(buf[provider_id]) > 0: self.send_event(provider_id, buf[provider_id]) def get_header(self): return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION } def get_header_for_xml_content(self): return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION, - "Content-Type":"text/xml;charset=utf-8" + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION, + "Content-Type": "text/xml;charset=utf-8" } def get_header_for_cert(self): @@ -828,10 +861,10 @@ class WireClient(object): content = self.fetch_cache(trans_cert_file) cert = get_bytes_from_pem(content) return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION, + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION, "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert":cert + "x-ms-guest-agent-public-x509-cert": cert } class VersionInfo(object): @@ -840,7 +873,7 @@ class VersionInfo(object): Query endpoint server for wire protocol version. Fail if our desired protocol version is not seen. """ - logger.verb("Load Version.xml") + logger.verbose("Load Version.xml") self.parse(xml_text) def parse(self, xml_text): @@ -854,7 +887,7 @@ class VersionInfo(object): supported_version = findall(supported, "Version") for node in supported_version: version = gettext(node) - logger.verb("Fabric supported wire protocol version:{0}", version) + logger.verbose("Fabric supported wire protocol version:{0}", version) self.supported.append(version) def get_preferred(self): @@ -865,11 +898,10 @@ class VersionInfo(object): class GoalState(object): - def __init__(self, xml_text): if xml_text is None: raise ValueError("GoalState.xml is None") - logger.verb("Load GoalState.xml") + logger.verbose("Load GoalState.xml") self.incarnation = None self.expected_state = None self.hosting_env_uri = None @@ -891,7 +923,7 @@ class GoalState(object): self.expected_state = findtext(xml_doc, "ExpectedState") self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") self.shared_conf_uri = findtext(xml_doc, "SharedConfig") - self.certs_uri = findtext(xml_doc, "Certificates") + self.certs_uri = findtext(xml_doc, "Certificates") self.ext_uri = findtext(xml_doc, "ExtensionsConfig") role_instance = find(xml_doc, "RoleInstance") self.role_instance_id = findtext(role_instance, "InstanceId") @@ -907,10 +939,11 @@ class HostingEnv(object): parse Hosting enviromnet config and store in HostingEnvironmentConfig.xml """ + def __init__(self, xml_text): if xml_text is None: raise ValueError("HostingEnvironmentConfig.xml is None") - logger.verb("Load HostingEnvironmentConfig.xml") + logger.verbose("Load HostingEnvironmentConfig.xml") self.vm_name = None self.role_name = None self.deployment_name = None @@ -930,28 +963,30 @@ class HostingEnv(object): self.deployment_name = getattrib(deployment, "name") return self + class SharedConfig(object): """ parse role endpoint server and goal state config. """ + def __init__(self, xml_text): - logger.verb("Load SharedConfig.xml") + logger.verbose("Load SharedConfig.xml") self.parse(xml_text) def parse(self, xml_text): """ parse and write configuration to file SharedConfig.xml. """ - #Not used currently + # Not used currently return self class Certificates(object): - """ Object containing certificates of host and provisioned user. """ + def __init__(self, client, xml_text): - logger.verb("Load Certificates.xml") + logger.verbose("Load Certificates.xml") self.client = client self.cert_list = CertList() self.parse(xml_text) @@ -964,7 +999,7 @@ class Certificates(object): data = findtext(xml_doc, "Data") if data is None: return - + cryptutil = CryptUtil(conf.get_openssl_cmd()) p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) p7m = ("MIME-Version:1.0\n" @@ -975,17 +1010,17 @@ class Certificates(object): "{2}").format(p7m_file, p7m_file, data) self.client.save_cache(p7m_file, p7m) - - trans_prv_file = os.path.join(conf.get_lib_dir(), + + trans_prv_file = os.path.join(conf.get_lib_dir(), TRANSPORT_PRV_FILE_NAME) trans_cert_file = os.path.join(conf.get_lib_dir(), TRANSPORT_CERT_FILE_NAME) pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) - #decrypt certificates - cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, - pem_file) + # decrypt certificates + cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, + pem_file) - #The parsing process use public key to match prv and crt. + # The parsing process use public key to match prv and crt. buf = [] begin_crt = False begin_prv = False @@ -1012,18 +1047,18 @@ class Certificates(object): pub = cryptutil.get_pubkey_from_crt(tmp_file) thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) thumbprints[pub] = thumbprint - #Rename crt with thumbprint as the file name + # Rename crt with thumbprint as the file name crt = "{0}.crt".format(thumbprint) v1_cert_list.append({ - "name":None, - "thumbprint":thumbprint + "name": None, + "thumbprint": thumbprint }) os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) buf = [] index += 1 begin_crt = False - #Rename prv key with thumbprint as the file name + # Rename prv key with thumbprint as the file name for pubkey in prvs: thumbprint = thumbprints[pubkey] if thumbprint: @@ -1037,7 +1072,7 @@ class Certificates(object): self.cert_list.certificates.append(cert) def write_to_tmp_file(self, index, suffix, buf): - file_name = os.path.join(conf.get_lib_dir(), + file_name = os.path.join(conf.get_lib_dir(), "{0}.{1}".format(index, suffix)) self.client.save_cache(file_name, "".join(buf)) return file_name @@ -1050,8 +1085,9 @@ class ExtensionsConfig(object): """ def __init__(self, xml_text): - logger.verb("Load ExtensionsConfig.xml") + logger.verbose("Load ExtensionsConfig.xml") self.ext_handlers = ExtHandlerList() + self.vmagent_manifests = VMAgentManifestList() self.status_upload_blob = None if xml_text is not None: self.parse(xml_text) @@ -1061,6 +1097,21 @@ class ExtensionsConfig(object): Write configuration to file ExtensionsConfig.xml. """ xml_doc = parse_doc(xml_text) + + ga_families_list = find(xml_doc, "GAFamilies") + ga_families = findall(ga_families_list, "GAFamily") + + for ga_family in ga_families: + family = findtext(ga_family, "Name") + uris_list = find(ga_family, "Uris") + uris = findall(uris_list, "Uri") + manifest = VMAgentManifest() + manifest.family = family + for uri in uris: + manifestUri = VMAgentManifestUri(uri=gettext(uri)) + manifest.versionsManifestUris.append(manifestUri) + self.vmagent_manifests.vmAgentManifests.append(manifest) + plugins_list = find(xml_doc, "Plugins") plugins = findall(plugins_list, "Plugin") plugin_settings_list = find(xml_doc, "PluginSettings") @@ -1095,13 +1146,13 @@ class ExtensionsConfig(object): def parse_plugin_settings(self, ext_handler, plugin_settings): if plugin_settings is None: - return + return name = ext_handler.name version = ext_handler.properties.version settings = [x for x in plugin_settings \ - if getattrib(x, "name") == name and \ - getattrib(x ,"version") == version] + if getattrib(x, "name") == name and \ + getattrib(x, "version") == version] if settings is None or len(settings) == 0: return @@ -1119,8 +1170,8 @@ class ExtensionsConfig(object): for plugin_settings_list in runtime_settings["runtimeSettings"]: handler_settings = plugin_settings_list["handlerSettings"] ext = Extension() - #There is no "extension name" in wire protocol. - #Put + # There is no "extension name" in wire protocol. + # Put ext.name = ext_handler.name ext.sequenceNumber = seqNo ext.publicSettings = handler_settings.get("publicSettings") @@ -1129,27 +1180,39 @@ class ExtensionsConfig(object): ext.certificateThumbprint = thumbprint ext_handler.properties.extensions.append(ext) + class ExtensionManifest(object): def __init__(self, xml_text): if xml_text is None: raise ValueError("ExtensionManifest is None") - logger.verb("Load ExtensionManifest.xml") + logger.verbose("Load ExtensionManifest.xml") self.pkg_list = ExtHandlerPackageList() self.parse(xml_text) def parse(self, xml_text): xml_doc = parse_doc(xml_text) - packages = findall(xml_doc, "Plugin") + self._handle_packages(findall(find(xml_doc, "Plugins"), "Plugin"), False) + self._handle_packages(findall(find(xml_doc, "InternalPlugins"), "Plugin"), True) + + def _handle_packages(self, packages, isinternal): for package in packages: version = findtext(package, "Version") + + disallow_major_upgrade = findtext(package, "DisallowMajorVersionUpgrade") + if disallow_major_upgrade is None: + disallow_major_upgrade = '' + disallow_major_upgrade = disallow_major_upgrade.lower() == "true" + uris = find(package, "Uris") uri_list = findall(uris, "Uri") uri_list = [gettext(x) for x in uri_list] - package = ExtHandlerPackage() - package.version = version + pkg = ExtHandlerPackage() + pkg.version = version + pkg.disallow_major_upgrade = disallow_major_upgrade for uri in uri_list: pkg_uri = ExtHandlerVersionUri() pkg_uri.uri = uri - package.uris.append(pkg_uri) - self.pkg_list.versions.append(package) + pkg.uris.append(pkg_uri) + pkg.isinternal = isinternal + self.pkg_list.versions.append(pkg) diff --git a/azurelinuxagent/common/rdma.py b/azurelinuxagent/common/rdma.py new file mode 100644 index 0000000..0c17e38 --- /dev/null +++ b/azurelinuxagent/common/rdma.py @@ -0,0 +1,280 @@ +# Windows Azure Linux Agent +# +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Handle packages and modules to enable RDMA for IB networking +""" + +import os +import re +import time +import threading + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib + + +from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME + +dapl_config_paths = [ + '/etc/dat.conf', + '/etc/rdma/dat.conf', + '/usr/local/etc/dat.conf' +] + +def setup_rdma_device(): + logger.verbose("Parsing SharedConfig XML contents for RDMA details") + xml_doc = parse_doc( + fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) + if xml_doc is None: + logger.error("Could not parse SharedConfig XML document") + return + instance_elem = find(xml_doc, "Instance") + if not instance_elem: + logger.error("Could not find <Instance> in SharedConfig document") + return + + rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address") + if not rdma_ipv4_addr: + logger.error( + "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document") + return + + rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress") + if not rdma_mac_addr: + logger.error( + "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document") + return + + # add colons to the MAC address (e.g. 00155D33FF1D -> + # 00:15:5D:33:FF:1D) + rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2] + for i in range(0, len(rdma_mac_addr), 2)]) + logger.info("Found RDMA details. IPv4={0} MAC={1}".format( + rdma_ipv4_addr, rdma_mac_addr)) + + # Set up the RDMA device with collected informatino + RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start() + logger.info("RDMA: device is set up") + return + +class RDMAHandler(object): + + driver_module_name = 'hv_network_direct' + + @staticmethod + def get_rdma_version(): + """Retrieve the firmware version information from the system. + This depends on information provided by the Linux kernel.""" + + driver_info_source = '/var/lib/hyperv/.kvp_pool_0' + base_kernel_err_msg = 'Kernel does not provide the necessary ' + base_kernel_err_msg += 'information or the hv_kvp_daemon is not ' + base_kernel_err_msg += 'running.' + if not os.path.isfile(driver_info_source): + error_msg = 'RDMA: Source file "%s" does not exist. ' + error_msg += base_kernel_err_msg + logger.error(error_msg % driver_info_source) + return + + lines = open(driver_info_source).read() + if not lines: + error_msg = 'RDMA: Source file "%s" is empty. ' + error_msg += base_kernel_err_msg + logger.error(error_msg % driver_info_source) + return + + r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines) + if r: + NdDriverVersion = r.groups()[0] + return NdDriverVersion + else: + error_msg = 'RDMA: NdDriverVersion not found in "%s"' + logger.error(error_msg % driver_info_source) + return + + def load_driver_module(self): + """Load the kernel driver, this depends on the proper driver + to be installed with the install_driver() method""" + logger.info("RDMA: probing module '%s'" % self.driver_module_name) + result = shellutil.run('modprobe %s' % self.driver_module_name) + if result != 0: + error_msg = 'Could not load "%s" kernel module. ' + error_msg += 'Run "modprobe %s" as root for more details' + logger.error( + error_msg % (self.driver_module_name, self.driver_module_name) + ) + return + logger.info('RDMA: Loaded the kernel driver successfully.') + return True + + def install_driver(self): + """Install the driver. This is distribution specific and must + be overwritten in the child implementation.""" + logger.error('RDMAHandler.install_driver not implemented') + + def is_driver_loaded(self): + """Check if the network module is loaded in kernel space""" + cmd = 'lsmod | grep ^%s' % self.driver_module_name + status, loaded_modules = shellutil.run_get_output(cmd) + logger.info('RDMA: Checking if the module loaded.') + if loaded_modules: + logger.info('RDMA: module loaded.') + return True + logger.info('RDMA: module not loaded.') + + def reboot_system(self): + """Reboot the system. This is required as the kernel module for + the rdma driver cannot be unloaded with rmmod""" + logger.info('RDMA: Rebooting system.') + ret = shellutil.run('shutdown -r now') + if ret != 0: + logger.error('RDMA: Failed to reboot the system') + + +dapl_config_paths = [ + '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf'] + +class RDMADeviceHandler(object): + + """ + Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma + interface. + """ + + rdma_dev = '/dev/hvnd_rdma' + device_check_timeout_sec = 120 + device_check_interval_sec = 1 + + ipv4_addr = None + mac_adr = None + + def __init__(self, ipv4_addr, mac_addr): + self.ipv4_addr = ipv4_addr + self.mac_addr = mac_addr + + def start(self): + """ + Start a thread in the background to process the RDMA tasks and returns. + """ + logger.info("RDMA: starting device processing in the background.") + threading.Thread(target=self.process).start() + + def process(self): + RDMADeviceHandler.wait_rdma_device( + self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) + RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) + RDMADeviceHandler.write_rdma_config_to_device( + self.rdma_dev, self.ipv4_addr, self.mac_addr) + RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) + + @staticmethod + def update_dat_conf(paths, ipv4_addr): + """ + Looks at paths for dat.conf file and updates the ip address for the + infiniband interface. + """ + logger.info("Updating DAPL configuration file") + for f in paths: + logger.info("RDMA: trying {0}".format(f)) + if not os.path.isfile(f): + logger.info( + "RDMA: DAPL config not found at {0}".format(f)) + continue + logger.info("RDMA: DAPL config is at: {0}".format(f)) + cfg = fileutil.read_file(f) + new_cfg = RDMADeviceHandler.replace_dat_conf_contents( + cfg, ipv4_addr) + fileutil.write_file(f, new_cfg) + logger.info("RDMA: DAPL configuration is updated") + return + + raise Exception("RDMA: DAPL configuration file not found at predefined paths") + + @staticmethod + def replace_dat_conf_contents(cfg, ipv4_addr): + old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" + new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( + ipv4_addr) + return re.sub(old, new, cfg) + + @staticmethod + def write_rdma_config_to_device(path, ipv4_addr, mac_addr): + data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr) + logger.info( + "RDMA: Updating device with configuration: {0}".format(data)) + with open(path, "w") as f: + f.write(data) + logger.info("RDMA: Updated device with IPv4/MAC addr successfully") + + @staticmethod + def generate_rdma_config(ipv4_addr, mac_addr): + return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr) + + @staticmethod + def wait_rdma_device(path, timeout_sec, check_interval_sec): + logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec)) + total_retries = timeout_sec/check_interval_sec + n = 0 + while n < total_retries: + if os.path.exists(path): + logger.info("RDMA: device ready") + return + logger.verbose( + "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) + time.sleep(check_interval_sec) + n += 1 + logger.error("RDMA device wait timed out") + raise Exception("The device did not show up in {0} seconds ({1} retries)".format( + timeout_sec, total_retries)) + + @staticmethod + def update_network_interface(mac_addr, ipv4_addr): + netmask=16 + + logger.info("RDMA: will update the network interface with IPv4/MAC") + + if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr) + logger.info("RDMA: network interface found: {0}", if_name) + logger.info("RDMA: bringing network interface up") + if shellutil.run("ifconfig {0} up".format(if_name)) != 0: + raise Exception("Could not bring up RMDA interface: {0}".format(if_name)) + + logger.info("RDMA: configuring IPv4 addr and netmask on interface") + addr = '{0}/{1}'.format(ipv4_addr, netmask) + if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0: + raise Exception("Could set addr to {1} on {0}".format(if_name, addr)) + logger.info("RDMA: network address and netmask configured on interface") + + @staticmethod + def get_interface_by_mac(mac): + ret, output = shellutil.run_get_output("ifconfig -a") + if ret != 0: + raise Exception("Failed to list network interfaces") + output = output.replace('\n', '') + match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), + output, re.IGNORECASE) + if match is None: + raise Exception("Failed to get ifname with mac: {0}".format(mac)) + output = match.group(0) + eths = re.findall(r"eth\d", output) + if eths is None or len(eths) == 0: + raise Exception("ifname with mac: {0} not found".format(mac)) + return eths[-1] diff --git a/azurelinuxagent/distro/ubuntu/__init__.py b/azurelinuxagent/common/utils/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/ubuntu/__init__.py +++ b/azurelinuxagent/common/utils/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/utils/cryptutil.py b/azurelinuxagent/common/utils/cryptutil.py index 5ee5637..b35bda0 100644 --- a/azurelinuxagent/utils/cryptutil.py +++ b/azurelinuxagent/common/utils/cryptutil.py @@ -19,9 +19,9 @@ import base64 import struct -from azurelinuxagent.future import ustr, bytebuffer -from azurelinuxagent.exception import CryptError -import azurelinuxagent.utils.shellutil as shellutil +from azurelinuxagent.common.future import ustr, bytebuffer +from azurelinuxagent.common.exception import CryptError +import azurelinuxagent.common.utils.shellutil as shellutil class CryptUtil(object): def __init__(self, openssl_cmd): diff --git a/azurelinuxagent/utils/fileutil.py b/azurelinuxagent/common/utils/fileutil.py index 5369a7c..7ef4fef 100644 --- a/azurelinuxagent/utils/fileutil.py +++ b/azurelinuxagent/common/utils/fileutil.py @@ -26,9 +26,16 @@ import re import shutil import pwd import tempfile -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.textutil as textutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.textutil as textutil + +def copy_file(from_path, to_path=None, to_dir=None): + if to_path is None: + to_path = os.path.join(to_dir, os.path.basename(from_path)) + shutil.copyfile(from_path, to_path) + return to_path + def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): """ @@ -66,40 +73,6 @@ def append_file(filepath, contents, asbin=False, encoding='utf-8'): """ write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) -def replace_file(filepath, contents): - """ - Write 'contents' to 'filepath' by creating a temp file, - and replacing original. - """ - handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath)) - #if type(contents) == str: - #contents=contents.encode('latin-1') - try: - os.write(handle, contents) - except IOError as err: - logger.error('Write to file {0}, Exception is {1}', filepath, err) - return 1 - finally: - os.close(handle) - - try: - os.rename(temp, filepath) - except IOError as err: - logger.info('Rename {0} to {1}, Exception is {2}', temp, filepath, err) - logger.info('Remove original file and retry') - try: - os.remove(filepath) - except IOError as err: - logger.error('Remove {0}, Exception is {1}', temp, filepath, err) - - try: - os.rename(temp, filepath) - except IOError as err: - logger.error('Rename {0} to {1}, Exception is {2}', temp, filepath, - err) - return 1 - return 0 - def base_name(path): head, tail = os.path.split(path) @@ -125,11 +98,17 @@ def mkdir(dirpath, mode=None, owner=None): chowner(dirpath, owner) def chowner(path, owner): - owner_info = pwd.getpwnam(owner) - os.chown(path, owner_info[2], owner_info[3]) + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + else: + owner_info = pwd.getpwnam(owner) + os.chown(path, owner_info[2], owner_info[3]) def chmod(path, mode): - os.chmod(path, mode) + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + else: + os.chmod(path, mode) def rm_files(*args): for path in args: @@ -149,6 +128,11 @@ def rm_dirs(*args): elif os.path.isdir(path): shutil.rmtree(path) +def trim_ext(path, ext): + if not ext.startswith("."): + ext = "." + ext + return path.split(ext)[0] if path.endswith(ext) else path + def update_conf_file(path, line_start, val, chk_err=False): conf = [] if not os.path.isfile(path) and chk_err: @@ -156,7 +140,7 @@ def update_conf_file(path, line_start, val, chk_err=False): conf = read_file(path).split('\n') conf = [x for x in conf if not x.startswith(line_start)] conf.append(val) - replace_file(path, '\n'.join(conf)) + write_file(path, '\n'.join(conf)) def search_file(target_dir_name, target_file_name): for root, dirs, files in os.walk(target_dir_name): diff --git a/azurelinuxagent/common/utils/flexible_version.py b/azurelinuxagent/common/utils/flexible_version.py new file mode 100644 index 0000000..2fce88d --- /dev/null +++ b/azurelinuxagent/common/utils/flexible_version.py @@ -0,0 +1,199 @@ +from distutils import version +import re + +class FlexibleVersion(version.Version): + """ + A more flexible implementation of distutils.version.StrictVersion + + The implementation allows to specify: + - an arbitrary number of version numbers: + not only '1.2.3' , but also '1.2.3.4.5' + - the separator between version numbers: + '1-2-3' is allowed when '-' is specified as separator + - a flexible pre-release separator: + '1.2.3.alpha1', '1.2.3-alpha1', and '1.2.3alpha1' are considered equivalent + - an arbitrary ordering of pre-release tags: + 1.1alpha3 < 1.1beta2 < 1.1rc1 < 1.1 + when ["alpha", "beta", "rc"] is specified as pre-release tag list + + Inspiration from this discussion at StackOverflow: + http://stackoverflow.com/questions/12255554/sort-versions-in-python + """ + + def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): + version.Version.__init__(self) + + if sep is None: + sep = '.' + if prerel_tags is None: + prerel_tags = () + + self.sep = sep + self.prerel_sep = '' + self.prerel_tags = tuple(prerel_tags) if prerel_tags is not None else () + + self._compile_pattern() + + self.prerelease = None + self.version = () + if vstring: + self._parse(vstring) + return + + _nn_version = 'version' + _nn_prerel_sep = 'prerel_sep' + _nn_prerel_tag = 'tag' + _nn_prerel_num = 'tag_num' + + _re_prerel_sep = r'(?P<{pn}>{sep})?'.format( + pn=_nn_prerel_sep, + sep='|'.join(map(re.escape, ('.', '-')))) + + @property + def major(self): + return self.version[0] if len(self.version) > 0 else 0 + + @property + def minor(self): + return self.version[1] if len(self.version) > 1 else 0 + + @property + def patch(self): + return self.version[2] if len(self.version) > 2 else 0 + + def _parse(self, vstring): + m = self.version_re.match(vstring) + if not m: + raise ValueError("Invalid version number '{0}'".format(vstring)) + + self.prerelease = None + self.version = () + + self.prerel_sep = m.group(self._nn_prerel_sep) + tag = m.group(self._nn_prerel_tag) + tag_num = m.group(self._nn_prerel_num) + + if tag is not None and tag_num is not None: + self.prerelease = (tag, int(tag_num) if len(tag_num) else None) + + self.version = tuple(map(int, self.sep_re.split(m.group(self._nn_version)))) + return + + def __add__(self, increment): + version = list(self.version) + version[-1] += increment + vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) + return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) + + def __sub__(self, decrement): + version = list(self.version) + if version[-1] <= 0: + raise ArithmeticError("Cannot decrement final numeric component of {0} below zero" \ + .format(self)) + version[-1] -= decrement + vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) + return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) + + def __repr__(self): + return "{cls} ('{vstring}', '{sep}', {prerel_tags})"\ + .format( + cls=self.__class__.__name__, + vstring=str(self), + sep=self.sep, + prerel_tags=self.prerel_tags) + + def __str__(self): + return self._assemble(self.version, self.sep, self.prerel_sep, self.prerelease) + + def __ge__(self, that): + return not self.__lt__(that) + + def __gt__(self, that): + return (not self.__lt__(that)) and (not self.__eq__(that)) + + def __le__(self, that): + return (self.__lt__(that)) or (self.__eq__(that)) + + def __lt__(self, that): + this_version, that_version = self._ensure_compatible(that) + + if this_version != that_version \ + or self.prerelease is None and that.prerelease is None: + return this_version < that_version + + if self.prerelease is not None and that.prerelease is None: + return True + if self.prerelease is None and that.prerelease is not None: + return False + + this_index = self.prerel_tags_set[self.prerelease[0]] + that_index = self.prerel_tags_set[that.prerelease[0]] + if this_index == that_index: + return self.prerelease[1] < that.prerelease[1] + + return this_index < that_index + + def __ne__(self, that): + return not self.__eq__(that) + + def __eq__(self, that): + this_version, that_version = self._ensure_compatible(that) + + if this_version != that_version: + return False + + if self.prerelease != that.prerelease: + return False + + return True + + def _assemble(self, version, sep, prerel_sep, prerelease): + s = sep.join(map(str, version)) + if prerelease is not None: + if prerel_sep is not None: + s += prerel_sep + s += prerelease[0] + if prerelease[1] is not None: + s += str(prerelease[1]) + return s + + def _compile_pattern(self): + sep, self.sep_re = self._compile_separator(self.sep) + + if self.prerel_tags: + tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) + self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) + release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( + prerel_sep=self._re_prerel_sep, + tags=tags, + tn=self._nn_prerel_tag, + nn=self._nn_prerel_num) + else: + release_re = '' + + version_re = r'^(?P<{vn}>\d+(?:(?:{sep}\d+)*)?){rel}$'.format( + vn=self._nn_version, + sep=sep, + rel=release_re) + self.version_re = re.compile(version_re) + return + + def _compile_separator(self, sep): + if sep is None: + return '', re.compile('') + return re.escape(sep), re.compile(re.escape(sep)) + + def _ensure_compatible(self, that): + """ + Ensures the instances have the same structure and, if so, returns length compatible + version lists (so that x.y.0.0 is equivalent to x.y). + """ + if self.prerel_tags != that.prerel_tags or self.sep != that.sep: + raise ValueError("Unable to compare: versions have different structures") + + this_version = list(self.version[:]) + that_version = list(that.version[:]) + while len(this_version) < len(that_version): this_version.append(0) + while len(that_version) < len(this_version): that_version.append(0) + + return this_version, that_version diff --git a/azurelinuxagent/utils/restutil.py b/azurelinuxagent/common/utils/restutil.py index 2e8b0be..a789650 100644 --- a/azurelinuxagent/utils/restutil.py +++ b/azurelinuxagent/common/utils/restutil.py @@ -21,10 +21,10 @@ import time import platform import os import subprocess -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import HttpError -from azurelinuxagent.future import httpclient, urlparse +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import httpclient, urlparse """ REST api util functions @@ -87,9 +87,9 @@ def http_request(method, url, data, headers=None, max_retry=3, chk_proxy=False): Sending http request to server On error, sleep 10 and retry max_retry times. """ - logger.verb("HTTP Req: {0} {1}", method, url) - logger.verb(" Data={0}", data) - logger.verb(" Header={0}", headers) + logger.verbose("HTTP Req: {0} {1}", method, url) + logger.verbose(" Data={0}", data) + logger.verbose(" Header={0}", headers) host, port, secure, rel_uri = _parse_url(url) #Check proxy @@ -115,8 +115,8 @@ def http_request(method, url, data, headers=None, max_retry=3, chk_proxy=False): resp = _http_request(method, host, rel_uri, port=port, data=data, secure=secure, headers=headers, proxy_host=proxy_host, proxy_port=proxy_port) - logger.verb("HTTP Resp: Status={0}", resp.status) - logger.verb(" Header={0}", resp.getheaders()) + logger.verbose("HTTP Resp: Status={0}", resp.status) + logger.verbose(" Header={0}", resp.getheaders()) return resp except httpclient.HTTPException as e: logger.warn('HTTPException {0}, args:{1}', e, repr(e.args)) diff --git a/azurelinuxagent/utils/shellutil.py b/azurelinuxagent/common/utils/shellutil.py index 98871a1..d273c92 100644 --- a/azurelinuxagent/utils/shellutil.py +++ b/azurelinuxagent/common/utils/shellutil.py @@ -20,8 +20,8 @@ import platform import os import subprocess -from azurelinuxagent.future import ustr -import azurelinuxagent.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.logger as logger if not hasattr(subprocess,'check_output'): def check_output(*popenargs, **kwargs): @@ -72,7 +72,7 @@ def run_get_output(cmd, chk_err=True, log_cmd=True): Reports exceptions to Error if chk_err parameter is True """ if log_cmd: - logger.verb(u"run cmd '{0}'", cmd) + logger.verbose(u"run cmd '{0}'", cmd) try: output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) output = ustr(output, encoding='utf-8', errors="backslashreplace") @@ -86,4 +86,22 @@ def run_get_output(cmd, chk_err=True, log_cmd=True): return e.returncode, output return 0, output -#End shell command util functions + +def quote(word_list): + """ + Quote a list or tuple of strings for Unix Shell as words, using the + byte-literal single quote. + + The resulting string is safe for use with ``shell=True`` in ``subprocess``, + and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``. + + See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2: + http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02 + """ + if not isinstance(word_list, (tuple, list)): + word_list = (word_list,) + + return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list)) + + +# End shell command util functions diff --git a/azurelinuxagent/utils/textutil.py b/azurelinuxagent/common/utils/textutil.py index 851f98a..f03c7e6 100644 --- a/azurelinuxagent/utils/textutil.py +++ b/azurelinuxagent/common/utils/textutil.py @@ -16,23 +16,27 @@ # # Requires Python 2.4+ and Openssl 1.0+ +import base64 import crypt import random import string import struct -import xml.dom.minidom as minidom import sys -from distutils.version import LooseVersion +import xml.dom.minidom as minidom + +from distutils.version import LooseVersion as Version + def parse_doc(xml_text): """ Parse xml document from string """ - #The minidom lib has some issue with unicode in python2. - #Encode the string into utf-8 first + # The minidom lib has some issue with unicode in python2. + # Encode the string into utf-8 first xml_text = xml_text.encode('utf-8') return minidom.parseString(xml_text) + def findall(root, tag, namespace=None): """ Get all nodes by tag and namespace under Node root. @@ -45,6 +49,7 @@ def findall(root, tag, namespace=None): else: return root.getElementsByTagNameNS(namespace, tag) + def find(root, tag, namespace=None): """ Get first node by tag and namespace under Node root. @@ -55,18 +60,20 @@ def find(root, tag, namespace=None): else: return None + def gettext(node): """ Get node text """ if node is None: return None - + for child in node.childNodes: if child.nodeType == child.TEXT_NODE: return child.data return None + def findtext(root, tag, namespace=None): """ Get text of node by tag and namespace under Node root. @@ -74,6 +81,7 @@ def findtext(root, tag, namespace=None): node = find(root, tag, namespace=namespace) return gettext(node) + def getattrib(node, attr_name): """ Get attribute of xml node @@ -83,6 +91,7 @@ def getattrib(node, attr_name): else: return None + def unpack(buf, offset, range): """ Unpack bytes into python values. @@ -92,43 +101,50 @@ def unpack(buf, offset, range): result = (result << 8) | str_to_ord(buf[offset + i]) return result + def unpack_little_endian(buf, offset, length): """ Unpack little endian bytes into python values. """ return unpack(buf, offset, list(range(length - 1, -1, -1))) + def unpack_big_endian(buf, offset, length): """ Unpack big endian bytes into python values. """ return unpack(buf, offset, list(range(0, length))) + def hex_dump3(buf, offset, length): """ Dump range of buf in formatted hex. """ return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) + def hex_dump2(buf): """ Dump buf in formatted hex. """ return hex_dump3(buf, 0, len(buf)) + def is_in_range(a, low, high): """ Return True if 'a' in 'low' <= a >= 'high' """ return (a >= low and a <= high) + def is_printable(ch): """ Return True if character is displayable. """ return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) - or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) - or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) + or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) + or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) + def hex_dump(buffer, size): """ @@ -155,7 +171,7 @@ def hex_dump(buffer, size): j += 1 result += " " for j in range(i - (i % 16), i + 1): - byte=buffer[j] + byte = buffer[j] if type(byte) == str: byte = str_to_ord(byte.decode('latin1')) k = '.' @@ -166,6 +182,7 @@ def hex_dump(buffer, size): result += "\n" return result + def str_to_ord(a): """ Allows indexing into a string or an array of integers transparently. @@ -175,12 +192,14 @@ def str_to_ord(a): a = ord(a) return a + def compare_bytes(a, b, start, length): for offset in range(start, start + length): if str_to_ord(a[offset]) != str_to_ord(b[offset]): return False return True + def int_to_ip4_addr(a): """ Build DHCP request string. @@ -190,6 +209,7 @@ def int_to_ip4_addr(a): (a >> 8) & 0xFF, (a) & 0xFF) + def hexstr_to_bytearray(a): """ Return hex string packed into a binary struct. @@ -199,6 +219,7 @@ def hexstr_to_bytearray(a): b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) return b + def set_ssh_config(config, name, val): notfound = True for i in range(0, len(config)): @@ -206,24 +227,43 @@ def set_ssh_config(config, name, val): config[i] = "{0} {1}".format(name, val) notfound = False elif config[i].startswith("Match"): - #Match block must be put in the end of sshd config + # Match block must be put in the end of sshd config break if notfound: config.insert(i, "{0} {1}".format(name, val)) return config + +def set_ini_config(config, name, val): + notfound = True + nameEqual = name + '=' + length = len(config) + text = "{0}=\"{1}\"".format(name, val) + + for i in reversed(range(0, length)): + if config[i].startswith(nameEqual): + config[i] = text + notfound = False + break + + if notfound: + config.insert(length - 1, text) + + def remove_bom(c): if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \ - str_to_ord(c[2]) > 128: + str_to_ord(c[2]) > 128: c = c[3:] return c + def gen_password_hash(password, crypt_id, salt_len): collection = string.ascii_letters + string.digits salt = ''.join(random.choice(collection) for _ in range(salt_len)) salt = "${0}${1}".format(crypt_id, salt) return crypt.crypt(password, salt) + def get_bytes_from_pem(pem_str): base64_bytes = "" for line in pem_str.split('\n'): @@ -232,5 +272,8 @@ def get_bytes_from_pem(pem_str): return base64_bytes -Version = LooseVersion - +def b64encode(s): + from azurelinuxagent.common.version import PY_VERSION_MAJOR + if PY_VERSION_MAJOR > 2: + return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') + return base64.b64encode(s) diff --git a/azurelinuxagent/metadata.py b/azurelinuxagent/common/version.py index 34fdcf9..6c4b475 100644 --- a/azurelinuxagent/metadata.py +++ b/azurelinuxagent/common/version.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -21,33 +19,38 @@ import os import re import platform import sys -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.future import ustr + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.future import ustr + def get_distro(): if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', ustr(platform.release())) osinfo = ['freebsd', release, '', 'freebsd'] - if 'linux_distribution' in dir(platform): + elif 'linux_distribution' in dir(platform): osinfo = list(platform.linux_distribution(full_distribution_name=0)) full_name = platform.linux_distribution()[0].strip() osinfo.append(full_name) else: osinfo = platform.dist() - #The platform.py lib has issue with detecting oracle linux distribution. - #Merge the following patch provided by oracle as a temparory fix. + # The platform.py lib has issue with detecting oracle linux distribution. + # Merge the following patch provided by oracle as a temparory fix. if os.path.exists("/etc/oracle-release"): osinfo[2] = "oracle" osinfo[3] = "Oracle Linux" - #Remove trailing whitespace and quote in distro name + # Remove trailing whitespace and quote in distro name osinfo[0] = osinfo[0].strip('"').strip(' ').lower() return osinfo + AGENT_NAME = "WALinuxAgent" AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.1.3' +AGENT_VERSION = '2.1.5' AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) AGENT_DESCRIPTION = """\ The Azure Linux Agent supports the provisioning and running of Linux @@ -55,6 +58,35 @@ VMs in the Azure cloud. This package should be installed on Linux disk images that are built to run in the Azure environment. """ +AGENT_DIR_GLOB = "{0}-*".format(AGENT_NAME) +AGENT_PKG_GLOB = "{0}-*.zip".format(AGENT_NAME) + +AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) +AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) +AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) + + +# Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name +# - This ensures the agent will "see itself" using the same name and version +# as the code that downloads agents. +def set_current_agent(): + path = os.getcwd() + lib_dir = conf.get_lib_dir() + if lib_dir[-1] != os.path.sep: + lib_dir += os.path.sep + if path[:len(lib_dir)] != lib_dir: + agent = AGENT_LONG_VERSION + version = AGENT_VERSION + else: + agent = path[len(lib_dir):].split(os.path.sep)[0] + version = AGENT_NAME_PATTERN.match(agent).group(1) + return agent, FlexibleVersion(version) +CURRENT_AGENT, CURRENT_VERSION = set_current_agent() + +def is_current_agent_installed(): + return CURRENT_AGENT == AGENT_LONG_VERSION + + __distro__ = get_distro() DISTRO_NAME = __distro__[0] DISTRO_VERSION = __distro__[1] @@ -66,11 +98,12 @@ PY_VERSION_MAJOR = sys.version_info[0] PY_VERSION_MINOR = sys.version_info[1] PY_VERSION_MICRO = sys.version_info[2] - """ -Add this walk arround for detecting Snappy Ubuntu Core temporarily, until ubuntu +Add this workaround for detecting Snappy Ubuntu Core temporarily, until ubuntu fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 """ + + def is_snappy(): if os.path.exists("/etc/motd"): motd = fileutil.read_file("/etc/motd") @@ -78,5 +111,6 @@ def is_snappy(): return True return False + if is_snappy(): DISTRO_FULL_NAME = "Snappy Ubuntu Core" diff --git a/azurelinuxagent/daemon/__init__.py b/azurelinuxagent/daemon/__init__.py new file mode 100644 index 0000000..979e01b --- /dev/null +++ b/azurelinuxagent/daemon/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.daemon.main import get_daemon_handler diff --git a/azurelinuxagent/daemon/main.py b/azurelinuxagent/daemon/main.py new file mode 100644 index 0000000..d3185a1 --- /dev/null +++ b/azurelinuxagent/daemon/main.py @@ -0,0 +1,130 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import sys +import time +import traceback + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.event as event +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.rdma import RDMADeviceHandler, setup_rdma_device +from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib +from azurelinuxagent.common.version import AGENT_LONG_NAME, AGENT_VERSION, \ + DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME, PY_VERSION_MAJOR, \ + PY_VERSION_MINOR, PY_VERSION_MICRO +from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler +from azurelinuxagent.daemon.scvmm import get_scvmm_handler +from azurelinuxagent.pa.provision import get_provision_handler +from azurelinuxagent.pa.rdma import get_rdma_handler +from azurelinuxagent.ga.update import get_update_handler + +def get_daemon_handler(): + return DaemonHandler() + +class DaemonHandler(object): + """ + Main thread of daemon. It will invoke other threads to do actual work + """ + def __init__(self): + self.running = True + self.osutil = get_osutil() + + def run(self): + logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) + logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) + logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, + PY_VERSION_MICRO) + + self.check_pid() + + while self.running: + try: + self.daemon() + except Exception as e: + err_msg = traceback.format_exc() + add_event("WALA", is_success=False, message=ustr(err_msg), + op=WALAEventOperation.UnhandledError) + logger.info("Sleep 15 seconds and restart daemon") + time.sleep(15) + + + def check_pid(self): + """Check whether daemon is already running""" + pid = None + pid_file = conf.get_agent_pid_file_path() + if os.path.isfile(pid_file): + pid = fileutil.read_file(pid_file) + + if self.osutil.check_pid_alive(pid): + logger.info("Daemon is already running: {0}", pid) + sys.exit(0) + + fileutil.write_file(pid_file, ustr(os.getpid())) + + def daemon(self): + logger.info("Run daemon") + + self.protocol_util = get_protocol_util() + self.scvmm_handler = get_scvmm_handler() + self.resourcedisk_handler = get_resourcedisk_handler() + self.rdma_handler = get_rdma_handler() + self.provision_handler = get_provision_handler() + self.update_handler = get_update_handler() + + # Create lib dir + if not os.path.isdir(conf.get_lib_dir()): + fileutil.mkdir(conf.get_lib_dir(), mode=0o700) + os.chdir(conf.get_lib_dir()) + + if conf.get_detect_scvmm_env(): + self.scvmm_handler.run() + + if conf.get_resourcedisk_format(): + self.resourcedisk_handler.run() + + # Always redetermine the protocol start (e.g., wireserver vs. + # on-premise) since a VHD can move between environments + self.protocol_util.clear_protocol() + + self.provision_handler.run() + + # Enable RDMA, continue in errors + if conf.enable_rdma(): + self.rdma_handler.install_driver() + + logger.info("RDMA capabilities are enabled in configuration") + try: + setup_rdma_device() + except Exception as e: + logger.error("Error setting up rdma device: %s" % e) + else: + logger.info("RDMA capabilities are not enabled, skipping") + + while self.running: + self.update_handler.run_latest() diff --git a/azurelinuxagent/daemon/resourcedisk/__init__.py b/azurelinuxagent/daemon/resourcedisk/__init__.py new file mode 100644 index 0000000..021cecd --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/__init__.py @@ -0,0 +1,20 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler diff --git a/azurelinuxagent/distro/default/resourceDisk.py b/azurelinuxagent/daemon/resourcedisk/default.py index a6c5232..d2e400a 100644 --- a/azurelinuxagent/distro/default/resourceDisk.py +++ b/azurelinuxagent/daemon/resourcedisk/default.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -19,14 +17,16 @@ import os import re +import sys import threading -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -from azurelinuxagent.event import add_event, WALAEventOperation -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.exception import ResourceDiskError +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.event import add_event, WALAEventOperation +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.exception import ResourceDiskError +from azurelinuxagent.common.osutil import get_osutil DATALOSS_WARNING_FILE_NAME="DATALOSS_WARNING_README.txt" DATA_LOSS_WARNING="""\ @@ -40,8 +40,8 @@ For additional details to please refer to the MSDN documentation at : http://msd """ class ResourceDiskHandler(object): - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.osutil = get_osutil() def start_activate_resource_disk(self): disk_thread = threading.Thread(target = self.run) @@ -81,13 +81,13 @@ class ResourceDiskHandler(object): logger.error("Failed to enable swap {0}", e) def mount_resource_disk(self, mount_point, fs): - device = self.distro.osutil.device_for_ide_port(1) + device = self.osutil.device_for_ide_port(1) if device is None: raise ResourceDiskError("unable to detect disk topology") device = "/dev/" + device mountlist = shellutil.run_get_output("mount")[1] - existing = self.distro.osutil.get_mount_point(mountlist, device) + existing = self.osutil.get_mount_point(mountlist, device) if(existing): logger.info("Resource disk {0}1 is already mounted", device) @@ -158,10 +158,62 @@ class ResourceDiskHandler(object): if not os.path.isfile(swapfile): logger.info("Create swap file") - shellutil.run(("dd if=/dev/zero of={0} bs=1024 " - "count={1}").format(swapfile, size_kb)) + self.mkfile(swapfile, size_kb * 1024) shellutil.run("mkswap {0}".format(swapfile)) if shellutil.run("swapon {0}".format(swapfile)): raise ResourceDiskError("{0}".format(swapfile)) logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) + def mkfile(self, filename, nbytes): + """ + Create a non-sparse file of that size. Deletes and replaces existing file. + + To allow efficient execution, fallocate will be tried first. This includes + ``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate`` command + in the popular ``util-linux{,-ng}`` package. + + A dd fallback will be tried too. When size < 64M, perform single-pass dd. + Otherwise do two-pass dd. + """ + + if not isinstance(nbytes, int): + nbytes = int(nbytes) + + if nbytes < 0: + raise ValueError(nbytes) + + if os.path.isfile(filename): + os.remove(filename) + + # os.posix_fallocate + if sys.version_info >= (3, 3): + # Probable errors: + # - OSError: Seen on Cygwin, libc notimpl? + # - AttributeError: What if someone runs this under... + with open(filename, 'w') as f: + try: + os.posix_fallocate(f.fileno(), 0, nbytes) + return 0 + except: + # Not confident with this thing, just keep trying... + pass + + # fallocate command + fn_sh = shellutil.quote((filename,)) + ret = shellutil.run(u"fallocate -l {0} {1}".format(nbytes, fn_sh)) + if ret != 127: # 127 = command not found + return ret + + # dd fallback + dd_maxbs = 64 * 1024 ** 2 + dd_cmd = "dd if=/dev/zero bs={0} count={1} conv=notrunc of={2}" + + blocks = int(nbytes / dd_maxbs) + if blocks > 0: + ret = shellutil.run(dd_cmd.format(dd_maxbs, fn_sh, blocks)) << 8 + + remains = int(nbytes % dd_maxbs) + if remains > 0: + ret += shellutil.run(dd_cmd.format(remains, fn_sh, 1)) + + return ret diff --git a/azurelinuxagent/daemon/resourcedisk/factory.py b/azurelinuxagent/daemon/resourcedisk/factory.py new file mode 100644 index 0000000..76e5a23 --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/factory.py @@ -0,0 +1,33 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, \ + DISTRO_VERSION, \ + DISTRO_FULL_NAME +from .default import ResourceDiskHandler +from .freebsd import FreeBSDResourceDiskHandler + +def get_resourcedisk_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "freebsd": + return FreeBSDResourceDiskHandler() + + return ResourceDiskHandler() + diff --git a/azurelinuxagent/daemon/resourcedisk/freebsd.py b/azurelinuxagent/daemon/resourcedisk/freebsd.py new file mode 100644 index 0000000..36a3ac9 --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/freebsd.py @@ -0,0 +1,117 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.exception import ResourceDiskError +from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler + +class FreeBSDResourceDiskHandler(ResourceDiskHandler): + """ + This class handles resource disk mounting for FreeBSD. + + The resource disk locates at following slot: + scbus2 on blkvsc1 bus 0: + <Msft Virtual Disk 1.0> at scbus2 target 1 lun 0 (da1,pass2) + + There are 2 variations based on partition table type: + 1. MBR: The resource disk partition is /dev/da1s1 + 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. + """ + def __init__(self): + super(FreeBSDResourceDiskHandler, self).__init__() + + @staticmethod + def parse_gpart_list(data): + dic = {} + for line in data.split('\n'): + if line.find("Geom name: ") != -1: + geom_name = line[11:] + elif line.find("scheme: ") != -1: + dic[geom_name] = line[8:] + return dic + + def mount_resource_disk(self, mount_point, fs): + if fs != 'ufs': + raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) + + # 1. Detect device + err, output = shellutil.run_get_output('gpart list') + if err: + raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + disks = self.parse_gpart_list(output) + + err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') + if err: + raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + + # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' + device = None + for line in output.split('\n'): + index = line.find(':') + if index > 0: + geom_name = line[:index] + if geom_name in disks: + device = geom_name + break + + if not device: + raise ResourceDiskError("Unable to detect resource disk device.") + logger.info('Resource disk device {0} found.', device) + + # 2. Detect partition + partition_table_type = disks[device] + + if partition_table_type == 'MBR': + provider_name = device + 's1' + elif partition_table_type == 'GPT': + provider_name = device + 'p2' + else: + raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) + + err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) + if err or output.find(provider_name) == -1: + raise ResourceDiskError("Resource disk partition not found.") + + partition = '/dev/' + provider_name + logger.info('Resource disk partition {0} found.', partition) + + # 3. Mount partition + mount_list = shellutil.run_get_output("mount")[1] + existing = self.osutil.get_mount_point(mount_list, partition) + + if existing: + logger.info("Resource disk {0} is already mounted", partition) + return existing + + fileutil.mkdir(mount_point, mode=0o755) + mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) + err = shellutil.run(mount_cmd, chk_err=False) + if err: + logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) + err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) + if err: + raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" + .format(partition, output)) + err, output = shellutil.run_get_output(mount_cmd, chk_err=False) + if err: + raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) + + logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) + return mount_point diff --git a/azurelinuxagent/daemon/scvmm.py b/azurelinuxagent/daemon/scvmm.py new file mode 100644 index 0000000..dc6832a --- /dev/null +++ b/azurelinuxagent/daemon/scvmm.py @@ -0,0 +1,74 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import re +import os +import sys +import subprocess +import time +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.osutil import get_osutil + +VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" +VMM_STARTUP_SCRIPT_NAME= "install" + +def get_scvmm_handler(): + return ScvmmHandler() + +class ScvmmHandler(object): + def __init__(self): + self.osutil = get_osutil() + + def detect_scvmm_env(self, dev_dir='/dev'): + logger.info("Detecting Microsoft System Center VMM Environment") + found=False + + # try to load the ATAPI driver, continue on failure + self.osutil.try_load_atapiix_mod() + + # cycle through all available /dev/sr*|hd*|cdrom*|cd* looking for the scvmm configuration file + mount_point = conf.get_dvd_mount_point() + for devices in filter(lambda x: x is not None, [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]?|cd[0-9]+)', dev) for dev in os.listdir(dev_dir)]): + dvd_device = os.path.join(dev_dir, devices.group(0)) + self.osutil.mount_dvd(max_retry=1, chk_err=False, dvd_device=dvd_device, mount_point=mount_point) + found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) + if found: + self.start_scvmm_agent(mount_point=mount_point) + break + else: + self.osutil.umount_dvd(chk_err=False, mount_point=mount_point) + + return found + + def start_scvmm_agent(self, mount_point=None): + logger.info("Starting Microsoft System Center VMM Initialization " + "Process") + if mount_point is None: + mount_point = conf.get_dvd_mount_point() + startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) + devnull = open(os.devnull, 'w') + subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point], + stdout=devnull, stderr=devnull) + + def run(self): + if self.detect_scvmm_env(): + logger.info("Exiting") + time.sleep(300) + sys.exit(0) diff --git a/azurelinuxagent/distro/__init__.py b/azurelinuxagent/distro/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/__init__.py +++ b/azurelinuxagent/distro/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/coreos/distro.py b/azurelinuxagent/distro/coreos/distro.py deleted file mode 100644 index 04c7bff..0000000 --- a/azurelinuxagent/distro/coreos/distro.py +++ /dev/null @@ -1,29 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.coreos.osutil import CoreOSUtil -from azurelinuxagent.distro.coreos.deprovision import CoreOSDeprovisionHandler - -class CoreOSDistro(DefaultDistro): - def __init__(self): - super(CoreOSDistro, self).__init__() - self.osutil = CoreOSUtil() - self.deprovision_handler = CoreOSDeprovisionHandler(self) - diff --git a/azurelinuxagent/distro/debian/distro.py b/azurelinuxagent/distro/debian/distro.py deleted file mode 100644 index 01f4e3e..0000000 --- a/azurelinuxagent/distro/debian/distro.py +++ /dev/null @@ -1,27 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.debian.osutil import DebianOSUtil - -class DebianDistro(DefaultDistro): - def __init__(self): - super(DebianDistro, self).__init__() - self.osutil = DebianOSUtil() - diff --git a/azurelinuxagent/distro/default/daemon.py b/azurelinuxagent/distro/default/daemon.py deleted file mode 100644 index cf9eb16..0000000 --- a/azurelinuxagent/distro/default/daemon.py +++ /dev/null @@ -1,103 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import time -import sys -import traceback -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.metadata import AGENT_LONG_NAME, AGENT_VERSION, \ - DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_FULL_NAME, PY_VERSION_MAJOR, \ - PY_VERSION_MINOR, PY_VERSION_MICRO -import azurelinuxagent.event as event -import azurelinuxagent.utils.fileutil as fileutil - - -class DaemonHandler(object): - def __init__(self, distro): - self.distro = distro - self.running = True - - - def run(self): - logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) - logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) - logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, - PY_VERSION_MICRO) - - self.check_pid() - - while self.running: - try: - self.daemon() - except Exception as e: - err_msg = traceback.format_exc() - add_event("WALA", is_success=False, message=ustr(err_msg), - op=WALAEventOperation.UnhandledError) - logger.info("Sleep 15 seconds and restart daemon") - time.sleep(15) - - def check_pid(self): - """Check whether daemon is already running""" - pid = None - pid_file = conf.get_agent_pid_file_path() - if os.path.isfile(pid_file): - pid = fileutil.read_file(pid_file) - - if pid is not None and os.path.isdir(os.path.join("/proc", pid)): - logger.info("Daemon is already running: {0}", pid) - sys.exit(0) - - fileutil.write_file(pid_file, ustr(os.getpid())) - - def daemon(self): - logger.info("Run daemon") - #Create lib dir - if not os.path.isdir(conf.get_lib_dir()): - fileutil.mkdir(conf.get_lib_dir(), mode=0o700) - os.chdir(conf.get_lib_dir()) - - if conf.get_detect_scvmm_env(): - if self.distro.scvmm_handler.run(): - return - - self.distro.provision_handler.run() - - if conf.get_resourcedisk_format(): - self.distro.resource_disk_handler.run() - - try: - protocol = self.distro.protocol_util.detect_protocol() - except ProtocolError as e: - logger.error("Failed to detect protocol, exit", e) - return - - self.distro.event_handler.run() - self.distro.env_handler.run() - - while self.running: - #Handle extensions - self.distro.ext_handlers_handler.run() - time.sleep(25) - diff --git a/azurelinuxagent/distro/default/distro.py b/azurelinuxagent/distro/default/distro.py deleted file mode 100644 index ca0d77e..0000000 --- a/azurelinuxagent/distro/default/distro.py +++ /dev/null @@ -1,51 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.conf import ConfigurationProvider -from azurelinuxagent.distro.default.osutil import DefaultOSUtil -from azurelinuxagent.distro.default.daemon import DaemonHandler -from azurelinuxagent.distro.default.init import InitHandler -from azurelinuxagent.distro.default.monitor import MonitorHandler -from azurelinuxagent.distro.default.dhcp import DhcpHandler -from azurelinuxagent.distro.default.protocolUtil import ProtocolUtil -from azurelinuxagent.distro.default.scvmm import ScvmmHandler -from azurelinuxagent.distro.default.env import EnvHandler -from azurelinuxagent.distro.default.provision import ProvisionHandler -from azurelinuxagent.distro.default.resourceDisk import ResourceDiskHandler -from azurelinuxagent.distro.default.extension import ExtHandlersHandler -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler - -class DefaultDistro(object): - """ - """ - def __init__(self): - self.osutil = DefaultOSUtil() - self.protocol_util = ProtocolUtil(self) - - self.init_handler = InitHandler(self) - self.daemon_handler = DaemonHandler(self) - self.event_handler = MonitorHandler(self) - self.dhcp_handler = DhcpHandler(self) - self.scvmm_handler = ScvmmHandler(self) - self.env_handler = EnvHandler(self) - self.provision_handler = ProvisionHandler(self) - self.resource_disk_handler = ResourceDiskHandler(self) - self.ext_handlers_handler = ExtHandlersHandler(self) - self.deprovision_handler = DeprovisionHandler(self) - diff --git a/azurelinuxagent/distro/default/init.py b/azurelinuxagent/distro/default/init.py deleted file mode 100644 index c703e87..0000000 --- a/azurelinuxagent/distro/default/init.py +++ /dev/null @@ -1,53 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -import azurelinuxagent.event as event - - -class InitHandler(object): - def __init__(self, distro): - self.distro = distro - - def run(self, verbose): - #Init stdout log - level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO - logger.add_logger_appender(logger.AppenderType.STDOUT, level) - - #Init config - conf_file_path = self.distro.osutil.get_agent_conf_file_path() - conf.load_conf_from_file(conf_file_path) - - #Init log - verbose = verbose or conf.get_logs_verbose() - level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO - logger.add_logger_appender(logger.AppenderType.FILE, level, - path="/var/log/waagent.log") - logger.add_logger_appender(logger.AppenderType.CONSOLE, level, - path="/dev/console") - - #Init event reporter - event_dir = os.path.join(conf.get_lib_dir(), "events") - event.init_event_logger(event_dir) - event.enable_unhandled_err_dump("WALA") - - - diff --git a/azurelinuxagent/distro/default/scvmm.py b/azurelinuxagent/distro/default/scvmm.py deleted file mode 100644 index 4d083b4..0000000 --- a/azurelinuxagent/distro/default/scvmm.py +++ /dev/null @@ -1,48 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import subprocess -import azurelinuxagent.logger as logger - -VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" -VMM_STARTUP_SCRIPT_NAME= "install" - -class ScvmmHandler(object): - def __init__(self, distro): - self.distro = distro - - def detect_scvmm_env(self): - logger.info("Detecting Microsoft System Center VMM Environment") - self.distro.osutil.mount_dvd(max_retry=1, chk_err=False) - mount_point = self.distro.osutil.get_dvd_mount_point() - found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) - if found: - self.start_scvmm_agent() - else: - self.distro.osutil.umount_dvd(chk_err=False) - return found - - def start_scvmm_agent(self): - logger.info("Starting Microsoft System Center VMM Initialization " - "Process") - mount_point = self.distro.osutil.get_dvd_mount_point() - startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) - subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point]) - diff --git a/azurelinuxagent/distro/redhat/distro.py b/azurelinuxagent/distro/redhat/distro.py deleted file mode 100644 index 2f128d7..0000000 --- a/azurelinuxagent/distro/redhat/distro.py +++ /dev/null @@ -1,32 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.redhat.osutil import RedhatOSUtil, Redhat6xOSUtil -from azurelinuxagent.distro.coreos.deprovision import CoreOSDeprovisionHandler - -class Redhat6xDistro(DefaultDistro): - def __init__(self): - super(Redhat6xDistro, self).__init__() - self.osutil = Redhat6xOSUtil() - -class RedhatDistro(DefaultDistro): - def __init__(self): - super(RedhatDistro, self).__init__() - self.osutil = RedhatOSUtil() diff --git a/azurelinuxagent/distro/suse/__init__.py b/azurelinuxagent/distro/suse/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/suse/__init__.py +++ b/azurelinuxagent/distro/suse/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/ubuntu/distro.py b/azurelinuxagent/distro/ubuntu/distro.py deleted file mode 100644 index f380f6c..0000000 --- a/azurelinuxagent/distro/ubuntu/distro.py +++ /dev/null @@ -1,55 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.ubuntu.osutil import Ubuntu14OSUtil, \ - Ubuntu12OSUtil, \ - UbuntuOSUtil, \ - UbuntuSnappyOSUtil - -from azurelinuxagent.distro.ubuntu.provision import UbuntuProvisionHandler -from azurelinuxagent.distro.ubuntu.deprovision import UbuntuDeprovisionHandler - -class UbuntuDistro(DefaultDistro): - def __init__(self): - super(UbuntuDistro, self).__init__() - self.osutil = UbuntuOSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class Ubuntu12Distro(DefaultDistro): - def __init__(self): - super(Ubuntu12Distro, self).__init__() - self.osutil = Ubuntu12OSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class Ubuntu14Distro(DefaultDistro): - def __init__(self): - super(Ubuntu14Distro, self).__init__() - self.osutil = Ubuntu14OSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class UbuntuSnappyDistro(DefaultDistro): - def __init__(self): - super(UbuntuSnappyDistro, self).__init__() - self.osutil = UbuntuSnappyOSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) diff --git a/azurelinuxagent/distro/default/__init__.py b/azurelinuxagent/ga/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/default/__init__.py +++ b/azurelinuxagent/ga/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/default/env.py b/azurelinuxagent/ga/env.py index 7878cff..2d67d4b 100644 --- a/azurelinuxagent/distro/default/env.py +++ b/azurelinuxagent/ga/env.py @@ -19,10 +19,17 @@ import os import socket -import threading import time -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf +import threading + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.dhcp import get_dhcp_handler +from azurelinuxagent.common.osutil import get_osutil + +def get_env_handler(): + return EnvHandler() class EnvHandler(object): """ @@ -32,8 +39,9 @@ class EnvHandler(object): Monitor scsi disk. If new scsi disk found, set timeout """ - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.osutil = get_osutil() + self.dhcp_handler = get_dhcp_handler() self.stopped = True self.hostname = None self.dhcpid = None @@ -46,9 +54,9 @@ class EnvHandler(object): self.stopped = False logger.info("Start env monitor service.") - self.distro.dhcp_handler.conf_routes() + self.dhcp_handler.conf_routes() self.hostname = socket.gethostname() - self.dhcpid = self.distro.osutil.get_dhcp_pid() + self.dhcpid = self.osutil.get_dhcp_pid() self.server_thread = threading.Thread(target = self.monitor) self.server_thread.setDaemon(True) self.server_thread.start() @@ -59,10 +67,10 @@ class EnvHandler(object): If dhcp clinet process re-start has occurred, reset routes. """ while not self.stopped: - self.distro.osutil.remove_rules_files() + self.osutil.remove_rules_files() timeout = conf.get_root_device_scsi_timeout() if timeout is not None: - self.distro.osutil.set_scsi_disks_timeout(timeout) + self.osutil.set_scsi_disks_timeout(timeout) if conf.get_monitor_hostname(): self.handle_hostname_update() self.handle_dhclient_restart() @@ -73,25 +81,25 @@ class EnvHandler(object): if curr_hostname != self.hostname: logger.info("EnvMonitor: Detected host name change: {0} -> {1}", self.hostname, curr_hostname) - self.distro.osutil.set_hostname(curr_hostname) - self.distro.osutil.publish_hostname(curr_hostname) + self.osutil.set_hostname(curr_hostname) + self.osutil.publish_hostname(curr_hostname) self.hostname = curr_hostname def handle_dhclient_restart(self): if self.dhcpid is None: logger.warn("Dhcp client is not running. ") - self.dhcpid = self.distro.osutil.get_dhcp_pid() + self.dhcpid = self.osutil.get_dhcp_pid() return #The dhcp process hasn't changed since last check - if os.path.isdir(os.path.join('/proc', self.dhcpid.strip())): + if self.osutil.check_pid_alive(self.dhcpid.strip()): return - newpid = self.distro.osutil.get_dhcp_pid() + newpid = self.osutil.get_dhcp_pid() if newpid is not None and newpid != self.dhcpid: logger.info("EnvMonitor: Detected dhcp client restart. " "Restoring routing table.") - self.distro.dhcp_handler.conf_routes() + self.dhcp_handler.conf_routes() self.dhcpid = newpid def stop(self): diff --git a/azurelinuxagent/distro/default/extension.py b/azurelinuxagent/ga/exthandlers.py index 82cdfed..d3c8f32 100644 --- a/azurelinuxagent/distro/default/extension.py +++ b/azurelinuxagent/ga/exthandlers.py @@ -16,26 +16,36 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -import os -import zipfile -import time + +import glob import json -import subprocess +import os import shutil -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ExtensionError, ProtocolError, HttpError -from azurelinuxagent.future import ustr -from azurelinuxagent.metadata import AGENT_VERSION -from azurelinuxagent.protocol.restapi import ExtHandlerStatus, ExtensionStatus, \ - ExtensionSubStatus, Extension, \ - VMStatus, ExtHandler, \ - get_properties, set_properties -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.restutil as restutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.textutil import Version +import subprocess +import time +import zipfile + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.shellutil as shellutil + +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ExtensionError, ProtocolError, HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.version import AGENT_VERSION +from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ + ExtensionStatus, \ + ExtensionSubStatus, \ + Extension, \ + VMStatus, ExtHandler, \ + get_properties, \ + set_properties +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION #HandlerEnvironment.json schema version HANDLER_ENVIRONMENT_VERSION = 1.0 @@ -103,40 +113,52 @@ class ExtHandlerState(object): Installed = "Installed" Enabled = "Enabled" +def get_exthandlers_handler(): + return ExtHandlersHandler() + class ExtHandlersHandler(object): - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.protocol_util = get_protocol_util() self.ext_handlers = None self.last_etag = None self.log_report = False def run(self): - ext_handlers, etag = None, None + self.ext_handlers, etag = None, None try: - self.protocol = self.distro.protocol_util.get_protocol() - ext_handlers, etag = self.protocol.get_ext_handlers() + self.protocol = self.protocol_util.get_protocol() + self.ext_handlers, etag = self.protocol.get_ext_handlers() except ProtocolError as e: - add_event(name="WALA", is_success=False, message=ustr(e)) + msg = u"Exception retrieving extension handlers: {0}".format( + ustr(e)) + logger.warn(msg) + add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=False, message=msg) return if self.last_etag is not None and self.last_etag == etag: - logger.verb("No change to ext handler config:{0}, skip", etag) + msg = u"Incarnation {0} has no extension updates".format(etag) + logger.verbose(msg) self.log_report = False else: - logger.info("Handle new ext handler config") + msg = u"Handle extensions updates for incarnation {0}".format(etag) + logger.info(msg) self.log_report = True #Log status report success on new config - self.handle_ext_handlers(ext_handlers) + self.handle_ext_handlers() self.last_etag = etag - self.report_ext_handlers_status(ext_handlers) + self.report_ext_handlers_status() + + def run_status(self): + self.report_ext_handlers_status() + return - def handle_ext_handlers(self, ext_handlers): - if ext_handlers.extHandlers is None or \ - len(ext_handlers.extHandlers) == 0: + def handle_ext_handlers(self): + if self.ext_handlers.extHandlers is None or \ + len(self.ext_handlers.extHandlers) == 0: logger.info("No ext handler config found") return - for ext_handler in ext_handlers.extHandlers: + for ext_handler in self.ext_handlers.extHandlers: #TODO handle install in sequence, enable in parallel self.handle_ext_handler(ext_handler) @@ -205,30 +227,34 @@ class ExtHandlersHandler(object): ext_handler_i.uninstall() ext_handler_i.rm_ext_handler_dir() - def report_ext_handlers_status(self, ext_handlers): + def report_ext_handlers_status(self): """Go thru handler_state dir, collect and report status""" vm_status = VMStatus() - vm_status.vmAgent.version = AGENT_VERSION + vm_status.vmAgent.version = str(CURRENT_VERSION) vm_status.vmAgent.status = "Ready" vm_status.vmAgent.message = "Guest Agent is running" - if ext_handlers is not None: - for ext_handler in ext_handlers.extHandlers: + if self.ext_handlers is not None: + for ext_handler in self.ext_handlers.extHandlers: try: self.report_ext_handler_status(vm_status, ext_handler) except ExtensionError as e: - add_event(name="WALA", is_success=False, message=ustr(e)) + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + is_success=False, + message=ustr(e)) - logger.verb("Report vm agent status") + logger.verbose("Report vm agent status") try: self.protocol.report_vm_status(vm_status) except ProtocolError as e: message = "Failed to report vm agent status: {0}".format(e) - add_event(name="WALA", is_success=False, message=message) + add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=False, message=message) if self.log_report: - logger.info("Successfully reported vm agent status") + logger.verbose("Successfully reported vm agent status") def report_ext_handler_status(self, vm_status, ext_handler): @@ -275,43 +301,102 @@ class ExtHandlerInstance(object): logger.LogLevel.INFO, log_file) def decide_version(self): - """ - If auto-upgrade, get the largest public extension version under - the requested major version family of currently installed plugin version - - Else, get the highest hot-fix for requested version, - """ self.logger.info("Decide which version to use") try: pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) except ProtocolError as e: raise ExtensionError("Failed to get ext handler pkgs", e) - version = self.ext_handler.properties.version - update_policy = self.ext_handler.properties.upgradePolicy - - version_frag = version.split('.') - if len(version_frag) < 2: - raise ExtensionError("Wrong version format: {0}".format(version)) - - version_prefix = None - if update_policy is not None and update_policy == 'auto': - version_prefix = "{0}.".format(version_frag[0]) + # Determine the desired and installed versions + requested_version = FlexibleVersion(self.ext_handler.properties.version) + installed_version = FlexibleVersion(self.get_installed_version()) + if installed_version is None: + installed_version = requested_version + + # Divide packages + # - Find the installed package (its version must exactly match) + # - Find the internal candidate (its version must exactly match) + # - Separate the public packages + internal_pkg = None + installed_pkg = None + public_pkgs = [] + for pkg in pkg_list.versions: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version == installed_version: + installed_pkg = pkg + if pkg.isinternal and pkg_version == requested_version: + internal_pkg = pkg + if not pkg.isinternal: + public_pkgs.append(pkg) + + internal_version = FlexibleVersion(internal_pkg.version) \ + if internal_pkg is not None \ + else FlexibleVersion() + public_pkgs.sort(key=lambda pkg: FlexibleVersion(pkg.version), reverse=True) + + # Determine the preferred version and type of upgrade occurring + preferred_version = max(requested_version, installed_version) + is_major_upgrade = preferred_version.major > installed_version.major + allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto' + + # Find the first public candidate which + # - Matches the preferred major version + # - Does not upgrade to a new, disallowed major version + # - And only increments the minor version if allowed + # Notes: + # - The patch / hotfix version is not considered + public_pkg = None + for pkg in public_pkgs: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version.major == preferred_version.major \ + and (not pkg.disallow_major_upgrade or not is_major_upgrade) \ + and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor): + public_pkg = pkg + break + + # If there are no candidates, locate the highest public version whose + # major matches that installed + if internal_pkg is None and public_pkg is None: + for pkg in public_pkgs: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version.major == installed_version.major: + public_pkg = pkg + break + + public_version = FlexibleVersion(public_pkg.version) \ + if public_pkg is not None \ + else FlexibleVersion() + + # Select the candidate + # - Use the public candidate if there is no internal candidate or + # the public is more recent (e.g., a hotfix patch) + # - Otherwise use the internal candidate + if internal_pkg is None or (public_pkg is not None and public_version > internal_version): + selected_pkg = public_pkg else: - version_prefix = "{0}.{1}.".format(version_frag[0], version_frag[1]) - - packages = [x for x in pkg_list.versions \ - if x.version.startswith(version_prefix) or \ - x.version == version] - - packages = sorted(packages, key=lambda x: Version(x.version), - reverse=True) + selected_pkg = internal_pkg + + selected_version = FlexibleVersion(selected_pkg.version) \ + if selected_pkg is not None \ + else FlexibleVersion() + + # Finally, update the version only if not downgrading + # Note: + # - A downgrade, which will be bound to the same major version, + # is allowed if the installed version is no longer available + if selected_pkg is None \ + or (installed_pkg is not None and selected_version < installed_version): + self.pkg = installed_pkg + self.ext_handler.properties.version = installed_version + else: + self.pkg = selected_pkg + self.ext_handler.properties.version = selected_pkg.version + + if self.pkg is None: + raise ExtensionError("Failed to find any valid extension package") - if len(packages) <= 0: - raise ExtensionError("Failed to find and valid extension package") - self.pkg = packages[0] - self.ext_handler.properties.version = packages[0].version self.logger.info("Use version: {0}", self.pkg.version) + return def version_gt(self, other): self_version = self.ext_handler.properties.version @@ -319,31 +404,29 @@ class ExtHandlerInstance(object): return Version(self_version) > Version(other_version) def get_installed_ext_handler(self): - lastest_version = None - ext_handler_name = self.ext_handler.name - - for dir_name in os.listdir(conf.get_lib_dir()): - path = os.path.join(conf.get_lib_dir(), dir_name) - if os.path.isdir(path) and dir_name.startswith(ext_handler_name): - seperator = dir_name.rfind('-') - if seperator < 0: - continue - installed_name = dir_name[0: seperator] - installed_version = dir_name[seperator + 1:] - if installed_name != ext_handler_name: - continue - if lastest_version is None or \ - Version(lastest_version) < Version(installed_version): - lastest_version = installed_version - + lastest_version = self.get_installed_version() if lastest_version is None: return None - data = get_properties(self.ext_handler) - old_ext_handler = ExtHandler() - set_properties("ExtHandler", old_ext_handler, data) - old_ext_handler.properties.version = lastest_version - return ExtHandlerInstance(old_ext_handler, self.protocol) + installed_handler = ExtHandler() + set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) + installed_handler.properties.version = lastest_version + return ExtHandlerInstance(installed_handler, self.protocol) + + def get_installed_version(self): + lastest_version = None + + for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")): + if not os.path.isdir(path): + continue + + separator = path.rfind('-') + version = FlexibleVersion(path[separator+1:]) + + if lastest_version is None or lastest_version < version: + lastest_version = version + + return str(lastest_version) if lastest_version is not None else None def copy_status_files(self, old_ext_handler_i): self.logger.info("Copy status files from old plugin to new") @@ -431,7 +514,7 @@ class ExtHandlerInstance(object): self.set_operation(WALAEventOperation.Enable) man = self.load_manifest() - self.launch_command(man.get_enable_command()) + self.launch_command(man.get_enable_command(), timeout=300) self.set_handler_state(ExtHandlerState.Enabled) self.set_handler_status(status="Ready", message="Plugin enabled") @@ -505,12 +588,12 @@ class ExtHandlerInstance(object): if curr_seq_no > seq_no: seq_no = curr_seq_no except Exception as e: - self.logger.verb("Failed to parse file name: {0}", item) + self.logger.verbose("Failed to parse file name: {0}", item) continue return seq_no def collect_ext_status(self, ext): - self.logger.verb("Collect extension status") + self.logger.verbose("Collect extension status") seq_no = self.get_largest_seq_no() if seq_no == -1: @@ -584,15 +667,17 @@ class ExtHandlerInstance(object): base_dir = self.get_base_dir() try: devnull = open(os.devnull, 'w') - child = subprocess.Popen(base_dir + "/" + cmd, shell=True, - cwd=base_dir, stdout=devnull) + child = subprocess.Popen(base_dir + "/" + cmd, + shell=True, + cwd=base_dir, + stdout=devnull) except Exception as e: #TODO do not catch all exception raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) - retry = timeout / 5 - while retry > 0 and child.poll == None: - time.sleep(5) + retry = timeout + while retry > 0 and child.poll() is None: + time.sleep(1) retry -= 1 if retry == 0: os.kill(child.pid, 9) diff --git a/azurelinuxagent/distro/default/monitor.py b/azurelinuxagent/ga/monitor.py index 3b26c9a..f49cef8 100644 --- a/azurelinuxagent/distro/default/monitor.py +++ b/azurelinuxagent/ga/monitor.py @@ -15,27 +15,29 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import os -import sys -import traceback -import atexit +import datetime import json +import os +import platform import time -import datetime import threading -import platform -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -from azurelinuxagent.event import WALAEventOperation, add_event -from azurelinuxagent.exception import EventError, ProtocolError, OSUtilError -from azurelinuxagent.future import ustr -from azurelinuxagent.utils.textutil import parse_doc, findall, find, getattrib -from azurelinuxagent.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ - TelemetryEvent, \ - set_properties, get_properties -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_LONG_VERSION + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.event import WALAEventOperation, add_event +from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ + TelemetryEventList, \ + TelemetryEvent, \ + set_properties +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ + CURRENT_AGENT, CURRENT_VERSION def parse_event(data_str): @@ -44,6 +46,7 @@ def parse_event(data_str): except ValueError: return parse_xml_event(data_str) + def parse_xml_param(param_node): name = getattrib(param_node, "Name") value_str = getattrib(param_node, "Value") @@ -55,14 +58,15 @@ def parse_xml_param(param_node): value = bool(value_str) elif attr_type == 'mt:float64': value = float(value_str) - return TelemetryEventParam(name, value) + return TelemetryEventParam(name, value) + def parse_xml_event(data_str): try: xml_doc = parse_doc(data_str) event_id = getattrib(find(xml_doc, "Event"), 'id') provider_id = getattrib(find(xml_doc, "Provider"), 'id') - event = TelemetryEvent(event_id, provider_id) + event = TelemetryEvent(event_id, provider_id) param_nodes = findall(xml_doc, 'Param') for param_node in param_nodes: event.parameters.append(parse_xml_param(param_node)) @@ -70,6 +74,7 @@ def parse_xml_event(data_str): except Exception as e: raise ValueError(ustr(e)) + def parse_json_event(data_str): data = json.loads(data_str) event = TelemetryEvent() @@ -77,13 +82,20 @@ def parse_json_event(data_str): return event +def get_monitor_handler(): + return MonitorHandler() + + class MonitorHandler(object): - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() self.sysinfo = [] - + def run(self): - event_thread = threading.Thread(target = self.daemon) + self.init_sysinfo() + + event_thread = threading.Thread(target=self.daemon) event_thread.setDaemon(True) event_thread.start() @@ -93,42 +105,41 @@ class MonitorHandler(object): DISTRO_VERSION, DISTRO_CODE_NAME, platform.release()) - - self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) - self.sysinfo.append(TelemetryEventParam("GAVersion", AGENT_LONG_VERSION)) - + self.sysinfo.append( + TelemetryEventParam("GAVersion", CURRENT_AGENT)) + try: - ram = self.distro.osutil.get_total_mem() - processors = self.distro.osutil.get_processor_cores() + ram = self.osutil.get_total_mem() + processors = self.osutil.get_processor_cores() self.sysinfo.append(TelemetryEventParam("RAM", ram)) self.sysinfo.append(TelemetryEventParam("Processors", processors)) except OSUtilError as e: logger.warn("Failed to get system info: {0}", e) try: - protocol = self.distro.protocol_util.get_protocol() + protocol = self.protocol_util.get_protocol() vminfo = protocol.get_vminfo() - self.sysinfo.append(TelemetryEventParam("VMName", - vminfo.vmName)) - self.sysinfo.append(TelemetryEventParam("TenantName", - vminfo.tenantName)) - self.sysinfo.append(TelemetryEventParam("RoleName", - vminfo.roleName)) - self.sysinfo.append(TelemetryEventParam("RoleInstanceName", - vminfo.roleInstanceName)) - self.sysinfo.append(TelemetryEventParam("ContainerId", - vminfo.containerId)) + self.sysinfo.append(TelemetryEventParam("VMName", + vminfo.vmName)) + self.sysinfo.append(TelemetryEventParam("TenantName", + vminfo.tenantName)) + self.sysinfo.append(TelemetryEventParam("RoleName", + vminfo.roleName)) + self.sysinfo.append(TelemetryEventParam("RoleInstanceName", + vminfo.roleInstanceName)) + self.sysinfo.append(TelemetryEventParam("ContainerId", + vminfo.containerId)) except ProtocolError as e: logger.warn("Failed to get system info: {0}", e) def collect_event(self, evt_file_name): try: - logger.verb("Found event file: {0}", evt_file_name) + logger.verbose("Found event file: {0}", evt_file_name) with open(evt_file_name, "rb") as evt_file: - #if fail to open or delete the file, throw exception - data_str = evt_file.read().decode("utf-8",'ignore') - logger.verb("Processed event file: {0}", evt_file_name) + # if fail to open or delete the file, throw exception + data_str = evt_file.read().decode("utf-8", 'ignore') + logger.verbose("Processed event file: {0}", evt_file_name) os.remove(evt_file_name) return data_str except IOError as e: @@ -159,19 +170,18 @@ class MonitorHandler(object): if len(event_list.events) == 0: return - + try: - protocol = self.distro.protocol_util.get_protocol() + protocol = self.protocol_util.get_protocol() protocol.report_event(event_list) except ProtocolError as e: logger.error("{0}", e) - + def daemon(self): - self.init_sysinfo() last_heartbeat = datetime.datetime.min - period = datetime.timedelta(hours = 12) - while(True): - if (datetime.datetime.now()-last_heartbeat) > period: + period = datetime.timedelta(minutes=30) + while True: + if (datetime.datetime.now() - last_heartbeat) > period: last_heartbeat = datetime.datetime.now() add_event(op=WALAEventOperation.HeartBeat, name="WALA", is_success=True) diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py new file mode 100644 index 0000000..e89608a --- /dev/null +++ b/azurelinuxagent/ga/update.py @@ -0,0 +1,715 @@ +# Windows Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import glob +import json +import os +import platform +import re +import shlex +import shutil +import signal +import subprocess +import sys +import time +import zipfile + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.textutil as textutil + +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import UpdateError, ProtocolError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ + AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ + AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ + CURRENT_AGENT, CURRENT_VERSION, \ + is_current_agent_installed + +from azurelinuxagent.ga.exthandlers import HandlerManifest + + +AGENT_ERROR_FILE = "error.json" # File name for agent error record +AGENT_MANIFEST_FILE = "HandlerManifest.json" + +CHILD_HEALTH_INTERVAL = 15 * 60 +CHILD_LAUNCH_INTERVAL = 5 * 60 +CHILD_LAUNCH_RESTART_MAX = 3 +CHILD_POLL_INTERVAL = 60 + +MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted + +GOAL_STATE_INTERVAL = 25 +REPORT_STATUS_INTERVAL = 15 +RETAIN_INTERVAL = 24 * 60 * 60 # Retain interval for black list + + +def get_update_handler(): + return UpdateHandler() + + +def get_python_cmd(): + major_version = platform.python_version_tuple()[0] + return "python" if int(major_version) <= 2 else "python{0}".format(major_version) + + +class UpdateHandler(object): + + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() + + self.running = True + self.last_etag = None + self.last_attempt_time = None + + self.agents = [] + + self.child_agent = None + self.child_launch_time = None + self.child_launch_attempts = 0 + self.child_process = None + + self.signal_handler = None + return + + def run_latest(self): + """ + This method is called from the daemon to find and launch the most + current, downloaded agent. + + Note: + - Most events should be tagged to the launched agent (agent_version) + """ + + if self.child_process is not None: + raise Exception("Illegal attempt to launch multiple goal state Agent processes") + + if self.signal_handler is None: + self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal) + + latest_agent = self.get_latest_agent() + if latest_agent is None: + logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT) + agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0]) + agent_dir = os.getcwd() + agent_name = CURRENT_AGENT + agent_version = CURRENT_VERSION + else: + logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name) + agent_cmd = latest_agent.get_agent_cmd() + agent_dir = latest_agent.get_agent_dir() + agent_name = latest_agent.name + agent_version = latest_agent.version + + try: + + # Launch the correct Python version for python-based agents + cmds = shlex.split(agent_cmd) + if cmds[0].lower() == "python": + cmds[0] = get_python_cmd() + agent_cmd = " ".join(cmds) + + self._evaluate_agent_health(latest_agent) + + self.child_process = subprocess.Popen( + cmds, + cwd=agent_dir, + stdout=sys.stdout, + stderr=sys.stderr) + + logger.info(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) + + ret = None + start_time = time.time() + while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: + time.sleep(CHILD_POLL_INTERVAL) + ret = self.child_process.poll() + if ret is not None: + break + + if ret is None or ret <= 0: + msg = u"Agent {0} launched with command '{1}' is successfully running".format( + agent_name, + agent_cmd) + logger.info(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=True, + message=msg) + + if ret is None: + ret = self.child_process.wait() + + else: + msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format( + agent_name, + agent_cmd, + ret) + logger.warn(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=False, + message=msg) + + if ret is not None and ret > 0: + msg = u"Agent {0} launched with command '{1}' returned code: {2}".format( + agent_name, + agent_cmd, + ret) + logger.warn(msg) + if latest_agent is not None: + latest_agent.mark_failure() + + except Exception as e: + msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format( + agent_name, + agent_cmd, + ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=False, + message=msg) + if latest_agent is not None: + latest_agent.mark_failure(is_fatal=True) + + self.child_process = None + return + + def run(self): + """ + This is the main loop which watches for agent and extension updates. + """ + + logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) + + # Launch monitoring threads + from azurelinuxagent.ga.monitor import get_monitor_handler + get_monitor_handler().run() + + from azurelinuxagent.ga.env import get_env_handler + get_env_handler().run() + + from azurelinuxagent.ga.exthandlers import get_exthandlers_handler + exthandlers_handler = get_exthandlers_handler() + + # TODO: Add means to stop running + try: + while self.running: + if self._ensure_latest_agent(): + if len(self.agents) > 0: + logger.info( + u"Agent {0} discovered {1} as an update and will exit", + CURRENT_AGENT, + self.agents[0].name) + break + + exthandlers_handler.run() + + time.sleep(25) + + except Exception as e: + logger.warn(u"Agent {0} failed with exception: {1}", CURRENT_AGENT, ustr(e)) + sys.exit(1) + + sys.exit(0) + return + + def forward_signal(self, signum, frame): + if self.child_process is None: + return + + logger.info( + u"Agent {0} forwarding signal {1} to {2}", + CURRENT_AGENT, + signum, + self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) + self.child_process.send_signal(signum) + + if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): + self.signal_handler(signum, frame) + elif self.signal_handler is signal.SIG_DFL: + if signum == signal.SIGTERM: + sys.exit(0) + return + + def get_latest_agent(self): + """ + If autoupdate is enabled, return the most current, downloaded, + non-blacklisted agent (if any). + Otherwise, return None (implying to use the installed agent). + """ + + if not conf.get_autoupdate_enabled(): + return None + + self._load_agents() + available_agents = [agent for agent in self.agents if agent.is_available] + return available_agents[0] if len(available_agents) >= 1 else None + + def _ensure_latest_agent(self, base_version=CURRENT_VERSION): + # Ignore new agents if updating is disabled + if not conf.get_autoupdate_enabled(): + return False + + now = time.time() + if self.last_attempt_time is not None: + next_attempt_time = self.last_attempt_time + conf.get_autoupdate_frequency() + else: + next_attempt_time = now + if next_attempt_time > now: + return False + + family = conf.get_autoupdate_gafamily() + logger.info("Checking for agent family {0} updates", family) + + self.last_attempt_time = now + try: + protocol = self.protocol_util.get_protocol() + manifest_list, etag = protocol.get_vmagent_manifests() + except Exception as e: + msg = u"Exception retrieving agent manifests: {0}".format(ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + return False + + if self.last_etag is not None and self.last_etag == etag: + logger.info(u"Incarnation {0} has no agent updates", etag) + return False + + manifests = [m for m in manifest_list.vmAgentManifests if m.family == family] + if len(manifests) == 0: + logger.info(u"Incarnation {0} has no agent family {1} updates", etag, family) + return False + + try: + pkg_list = protocol.get_vmagent_pkgs(manifests[0]) + except ProtocolError as e: + msg= u"Incarnation {0} failed to get {1} package list: {2}".format( + etag, + family, + ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + return False + + # Set the agents to those available for download at least as current as the existing agent + # and remove from disk any agent no longer reported to the VM. + # Note: + # The code leaves on disk available, but blacklisted, agents so as to preserve the state. + # Otherwise, those agents could be again downloaded and inappropriately retried. + self._set_agents([GuestAgent(pkg=pkg) for pkg in pkg_list.versions]) + self._purge_agents() + self._filter_blacklisted_agents() + + # Return True if agents more recent than the current are available + return len(self.agents) > 0 and self.agents[0].version > base_version + + def _evaluate_agent_health(self, latest_agent): + """ + Evaluate the health of the selected agent: If it is restarting + too frequently, raise an Exception to force blacklisting. + """ + if latest_agent is None: + self.child_agent = None + return + + if self.child_agent is None or latest_agent.version != self.child_agent.version: + self.child_agent = latest_agent + self.child_launch_time = None + self.child_launch_attempts = 0 + + if self.child_launch_time is None: + self.child_launch_time = time.time() + + self.child_launch_attempts += 1 + + if (time.time() - self.child_launch_time) <= CHILD_LAUNCH_INTERVAL \ + and self.child_launch_attempts >= CHILD_LAUNCH_RESTART_MAX: + msg = u"Agent {0} restarted more than {1} times in {2} seconds".format( + self.child_agent.name, + CHILD_LAUNCH_RESTART_MAX, + CHILD_LAUNCH_INTERVAL) + raise Exception(msg) + return + + def _filter_blacklisted_agents(self): + self.agents = [agent for agent in self.agents if not agent.is_blacklisted] + return + + def _load_agents(self): + """ + Load all non-blacklisted agents currently on disk. + """ + if len(self.agents) <= 0: + try: + path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) + self._set_agents([GuestAgent(path=agent_dir) + for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)]) + self._filter_blacklisted_agents() + except Exception as e: + logger.warn(u"Exception occurred loading available agents: {0}", ustr(e)) + return + + def _purge_agents(self): + """ + Remove from disk all directories and .zip files of unknown agents + (without removing the current, running agent). + """ + path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) + + known_versions = [agent.version for agent in self.agents] + if not is_current_agent_installed() and CURRENT_VERSION not in known_versions: + logger.warn( + u"Running Agent {0} was not found in the agent manifest - adding to list", + CURRENT_VERSION) + known_versions.append(CURRENT_VERSION) + + for agent_path in glob.iglob(path): + try: + name = fileutil.trim_ext(agent_path, "zip") + m = AGENT_DIR_PATTERN.match(name) + if m is not None and FlexibleVersion(m.group(1)) not in known_versions: + if os.path.isfile(agent_path): + logger.info(u"Purging outdated Agent file {0}", agent_path) + os.remove(agent_path) + else: + logger.info(u"Purging outdated Agent directory {0}", agent_path) + shutil.rmtree(agent_path) + except Exception as e: + logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e)) + return + + def _set_agents(self, agents=[]): + self.agents = agents + self.agents.sort(key=lambda agent: agent.version, reverse=True) + return + + +class GuestAgent(object): + def __init__(self, path=None, pkg=None): + self.pkg = pkg + version = None + if path is not None: + m = AGENT_DIR_PATTERN.match(path) + if m == None: + raise UpdateError(u"Illegal agent directory: {0}".format(path)) + version = m.group(1) + elif self.pkg is not None: + version = pkg.version + + if version == None: + raise UpdateError(u"Illegal agent version: {0}".format(version)) + self.version = FlexibleVersion(version) + + location = u"disk" if path is not None else u"package" + logger.info(u"Instantiating Agent {0} from {1}", self.name, location) + + self.error = None + self._load_error() + self._ensure_downloaded() + return + + @property + def name(self): + return "{0}-{1}".format(AGENT_NAME, self.version) + + def get_agent_cmd(self): + return self.manifest.get_enable_command() + + def get_agent_dir(self): + return os.path.join(conf.get_lib_dir(), self.name) + + def get_agent_error_file(self): + return os.path.join(conf.get_lib_dir(), self.name, AGENT_ERROR_FILE) + + def get_agent_manifest_path(self): + return os.path.join(self.get_agent_dir(), AGENT_MANIFEST_FILE) + + def get_agent_pkg_path(self): + return ".".join((os.path.join(conf.get_lib_dir(), self.name), "zip")) + + def clear_error(self): + self.error.clear() + return + + @property + def is_available(self): + return self.is_downloaded and not self.is_blacklisted + + @property + def is_blacklisted(self): + return self.error is not None and self.error.is_blacklisted + + @property + def is_downloaded(self): + return self.is_blacklisted or os.path.isfile(self.get_agent_manifest_path()) + + def mark_failure(self, is_fatal=False): + try: + if not os.path.isdir(self.get_agent_dir()): + os.makedirs(self.get_agent_dir()) + self.error.mark_failure(is_fatal=is_fatal) + self.error.save() + if is_fatal: + logger.warn(u"Agent {0} is permanently blacklisted", self.name) + except Exception as e: + logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e)) + return + + def _ensure_downloaded(self): + try: + logger.info(u"Ensuring Agent {0} is downloaded", self.name) + + if self.is_blacklisted: + logger.info(u"Agent {0} is blacklisted - skipping download", self.name) + return + + if self.is_downloaded: + logger.info(u"Agent {0} was previously downloaded - skipping download", self.name) + self._load_manifest() + return + + if self.pkg is None: + raise UpdateError(u"Agent {0} is missing package and download URIs".format( + self.name)) + + self._download() + self._unpack() + self._load_manifest() + self._load_error() + + msg = u"Agent {0} downloaded successfully".format(self.name) + logger.info(msg) + add_event( + AGENT_NAME, + version=self.version, + op=WALAEventOperation.Install, + is_success=True, + message=msg) + + except Exception as e: + # Note the failure, blacklist the agent if the package downloaded + # - An exception with a downloaded package indicates the package + # is corrupt (e.g., missing the HandlerManifest.json file) + self.mark_failure(is_fatal=os.path.isfile(self.get_agent_pkg_path())) + + msg = u"Agent {0} download failed with exception: {1}".format(self.name, ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + version=self.version, + op=WALAEventOperation.Install, + is_success=False, + message=msg) + return + + def _download(self): + package = None + + for uri in self.pkg.uris: + try: + resp = restutil.http_get(uri.uri, chk_proxy=True) + if resp.status == restutil.httpclient.OK: + package = resp.read() + fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) + logger.info(u"Agent {0} downloaded from {1}", self.name, uri.uri) + break + except restutil.HttpError as e: + logger.warn(u"Agent {0} download from {1} failed", self.name, uri.uri) + + if not os.path.isfile(self.get_agent_pkg_path()): + msg = u"Unable to download Agent {0} from any URI".format(self.name) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + raise UpdateError(msg) + return + + def _load_error(self): + try: + if self.error is None: + self.error = GuestAgentError(self.get_agent_error_file()) + self.error.load() + logger.info(u"Agent {0} error state: {1}", self.name, ustr(self.error)) + except Exception as e: + logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) + return + + def _load_manifest(self): + path = self.get_agent_manifest_path() + if not os.path.isfile(path): + msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + + with open(path, "r") as manifest_file: + try: + manifests = json.load(manifest_file) + except Exception as e: + msg = u"Agent {0} has a malformed {1}".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + if type(manifests) is list: + if len(manifests) <= 0: + msg = u"Agent {0} has an empty {1}".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + manifest = manifests[0] + else: + manifest = manifests + + try: + self.manifest = HandlerManifest(manifest) + if len(self.manifest.get_enable_command()) <= 0: + raise Exception(u"Manifest is missing the enable command") + except Exception as e: + msg = u"Agent {0} has an illegal {1}: {2}".format( + self.name, + AGENT_MANIFEST_FILE, + ustr(e)) + raise UpdateError(msg) + + logger.info( + u"Agent {0} loaded manifest from {1}", + self.name, + self.get_agent_manifest_path()) + logger.verbose(u"Successfully loaded Agent {0} {1}: {2}", + self.name, + AGENT_MANIFEST_FILE, + ustr(self.manifest.data)) + return + + def _unpack(self): + try: + if os.path.isdir(self.get_agent_dir()): + shutil.rmtree(self.get_agent_dir()) + + zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir()) + + except Exception as e: + msg = u"Exception unpacking Agent {0} from {1}: {2}".format( + self.name, + self.get_agent_pkg_path(), + ustr(e)) + raise UpdateError(msg) + + if not os.path.isdir(self.get_agent_dir()): + msg = u"Unpacking Agent {0} failed to create directory {1}".format( + self.name, + self.get_agent_dir()) + raise UpdateError(msg) + + logger.info( + u"Agent {0} unpacked successfully to {1}", + self.name, + self.get_agent_dir()) + return + + +class GuestAgentError(object): + def __init__(self, path): + if path is None: + raise UpdateError(u"GuestAgentError requires a path") + self.path = path + + self.clear() + self.load() + return + + def mark_failure(self, is_fatal=False): + self.last_failure = time.time() + self.failure_count += 1 + self.was_fatal = is_fatal + return + + def clear(self): + self.last_failure = 0.0 + self.failure_count = 0 + self.was_fatal = False + return + + def clear_old_failure(self): + if self.last_failure <= 0.0: + return + if self.last_failure < (time.time() - RETAIN_INTERVAL): + self.clear() + return + + @property + def is_blacklisted(self): + return self.was_fatal or self.failure_count >= MAX_FAILURE + + def load(self): + if self.path is not None and os.path.isfile(self.path): + with open(self.path, 'r') as f: + self.from_json(json.load(f)) + return + + def save(self): + if os.path.isdir(os.path.dirname(self.path)): + with open(self.path, 'w') as f: + json.dump(self.to_json(), f) + return + + def from_json(self, data): + self.last_failure = max( + self.last_failure, + data.get(u"last_failure", 0.0)) + self.failure_count = max( + self.failure_count, + data.get(u"failure_count", 0)) + self.was_fatal = self.was_fatal or data.get(u"was_fatal", False) + return + + def to_json(self): + data = { + u"last_failure": self.last_failure, + u"failure_count": self.failure_count, + u"was_fatal" : self.was_fatal + } + return data + + def __str__(self): + return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( + self.last_failure, + self.failure_count, + self.was_fatal) diff --git a/azurelinuxagent/distro/redhat/__init__.py b/azurelinuxagent/pa/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/redhat/__init__.py +++ b/azurelinuxagent/pa/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/pa/deprovision/__init__.py b/azurelinuxagent/pa/deprovision/__init__.py new file mode 100644 index 0000000..de77168 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler + +__all__ = ["get_deprovision_handler"] diff --git a/azurelinuxagent/distro/coreos/deprovision.py b/azurelinuxagent/pa/deprovision/coreos.py index 9642579..079a913 100644 --- a/azurelinuxagent/distro/coreos/deprovision.py +++ b/azurelinuxagent/pa/deprovision/coreos.py @@ -17,12 +17,13 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler, DeprovisionAction +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ + DeprovisionAction class CoreOSDeprovisionHandler(DeprovisionHandler): - def __init__(self, distro): - self.distro = distro + def __init__(self): + super(CoreOSDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) diff --git a/azurelinuxagent/distro/default/deprovision.py b/azurelinuxagent/pa/deprovision/default.py index 4db4cdc..b570c31 100644 --- a/azurelinuxagent/distro/default/deprovision.py +++ b/azurelinuxagent/pa/deprovision/default.py @@ -17,11 +17,13 @@ # Requires Python 2.4+ and Openssl 1.0+ # -import azurelinuxagent.conf as conf -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.future import read_input -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.future import read_input +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util class DeprovisionAction(object): def __init__(self, func, args=[], kwargs={}): @@ -33,19 +35,20 @@ class DeprovisionAction(object): self.func(*self.args, **self.kwargs) class DeprovisionHandler(object): - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() def del_root_password(self, warnings, actions): warnings.append("WARNING! root password will be disabled. " "You will not be able to login as root.") - actions.append(DeprovisionAction(self.distro.osutil.del_root_password)) + actions.append(DeprovisionAction(self.osutil.del_root_password)) def del_user(self, warnings, actions): try: - ovfenv = self.distro.protocol_util.get_ovf_env() + ovfenv = self.protocol_util.get_ovf_env() except ProtocolError: warnings.append("WARNING! ovf-env.xml is not found.") warnings.append("WARNING! Skip delete user.") @@ -54,7 +57,7 @@ class DeprovisionHandler(object): username = ovfenv.username warnings.append(("WARNING! {0} account and entire home directory " "will be deleted.").format(username)) - actions.append(DeprovisionAction(self.distro.osutil.del_account, + actions.append(DeprovisionAction(self.osutil.del_account, [username])) @@ -65,7 +68,7 @@ class DeprovisionHandler(object): def stop_agent_service(self, warnings, actions): warnings.append("WARNING! The waagent service will be stopped.") - actions.append(DeprovisionAction(self.distro.osutil.stop_agent_service)) + actions.append(DeprovisionAction(self.osutil.stop_agent_service)) def del_files(self, warnings, actions): files_to_del = ['/root/.bash_history', '/var/log/waagent.log'] @@ -76,15 +79,18 @@ class DeprovisionHandler(object): dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) + # For Freebsd + actions.append(DeprovisionAction(fileutil.rm_files, ["/var/db/dhclient.leases.hn0"])) + def del_lib_dir(self, warnings, actions): dirs_to_del = [conf.get_lib_dir()] actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) def reset_hostname(self, warnings, actions): localhost = ["localhost.localdomain"] - actions.append(DeprovisionAction(self.distro.osutil.set_hostname, + actions.append(DeprovisionAction(self.osutil.set_hostname, localhost)) - actions.append(DeprovisionAction(self.distro.osutil.set_dhcp_hostname, + actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname, localhost)) def setup(self, deluser): diff --git a/azurelinuxagent/pa/deprovision/factory.py b/azurelinuxagent/pa/deprovision/factory.py new file mode 100644 index 0000000..dd01633 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/factory.py @@ -0,0 +1,36 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME + +from .default import DeprovisionHandler +from .coreos import CoreOSDeprovisionHandler +from .ubuntu import UbuntuDeprovisionHandler + +def get_deprovision_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "ubuntu": + return UbuntuDeprovisionHandler() + if distro_name == "coreos": + return CoreOSDeprovisionHandler() + + return DeprovisionHandler() + diff --git a/azurelinuxagent/distro/ubuntu/deprovision.py b/azurelinuxagent/pa/deprovision/ubuntu.py index da6e834..14f90de 100644 --- a/azurelinuxagent/distro/ubuntu/deprovision.py +++ b/azurelinuxagent/pa/deprovision/ubuntu.py @@ -18,9 +18,10 @@ # import os -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler, DeprovisionAction +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ + DeprovisionAction def del_resolv(): if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': @@ -33,8 +34,8 @@ def del_resolv(): class UbuntuDeprovisionHandler(DeprovisionHandler): - def __init__(self, distro): - super(UbuntuDeprovisionHandler, self).__init__(distro) + def __init__(self): + super(UbuntuDeprovisionHandler, self).__init__() def setup(self, deluser): warnings, actions = super(UbuntuDeprovisionHandler, self).setup(deluser) diff --git a/azurelinuxagent/pa/provision/__init__.py b/azurelinuxagent/pa/provision/__init__.py new file mode 100644 index 0000000..05f75ae --- /dev/null +++ b/azurelinuxagent/pa/provision/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.provision.factory import get_provision_handler diff --git a/azurelinuxagent/distro/default/provision.py b/azurelinuxagent/pa/provision/default.py index 695b82a..b07c147 100644 --- a/azurelinuxagent/distro/default/provision.py +++ b/azurelinuxagent/pa/provision/default.py @@ -20,21 +20,25 @@ Provision handler """ import os -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProvisionError, ProtocolError, OSUtilError -from azurelinuxagent.protocol.restapi import ProvisionStatus -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.fileutil as fileutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProvisionError, ProtocolError, \ + OSUtilError +from azurelinuxagent.common.protocol.restapi import ProvisionStatus +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util CUSTOM_DATA_FILE="CustomData" class ProvisionHandler(object): - def __init__(self, distro): - self.distro = distro + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() def run(self): #If provision is not enabled, return @@ -49,12 +53,12 @@ class ProvisionHandler(object): logger.info("Run provision handler.") logger.info("Copy ovf-env.xml.") try: - ovfenv = self.distro.protocol_util.copy_ovf_env() + ovfenv = self.protocol_util.copy_ovf_env() except ProtocolError as e: self.report_event("Failed to copy ovf-env.xml: {0}".format(e)) return - self.distro.protocol_util.detect_protocol_by_file() + self.protocol_util.get_protocol_by_file() self.report_not_ready("Provisioning", "Starting") @@ -95,50 +99,50 @@ class ProvisionHandler(object): logger.info("Handle ovf-env.xml.") try: logger.info("Set host name.") - self.distro.osutil.set_hostname(ovfenv.hostname) + self.osutil.set_hostname(ovfenv.hostname) logger.info("Publish host name.") - self.distro.osutil.publish_hostname(ovfenv.hostname) + self.osutil.publish_hostname(ovfenv.hostname) self.config_user_account(ovfenv) self.save_customdata(ovfenv) if conf.get_delete_root_password(): - self.distro.osutil.del_root_password() + self.osutil.del_root_password() except OSUtilError as e: raise ProvisionError("Failed to handle ovf-env.xml: {0}".format(e)) def config_user_account(self, ovfenv): logger.info("Create user account if not exists") - self.distro.osutil.useradd(ovfenv.username) + self.osutil.useradd(ovfenv.username) if ovfenv.user_password is not None: logger.info("Set user password.") crypt_id = conf.get_password_cryptid() salt_len = conf.get_password_crypt_salt_len() - self.distro.osutil.chpasswd(ovfenv.username, ovfenv.user_password, + self.osutil.chpasswd(ovfenv.username, ovfenv.user_password, crypt_id=crypt_id, salt_len=salt_len) logger.info("Configure sudoer") - self.distro.osutil.conf_sudoer(ovfenv.username, ovfenv.user_password is None) + self.osutil.conf_sudoer(ovfenv.username, nopasswd=ovfenv.user_password is None) logger.info("Configure sshd") - self.distro.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) + self.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) #Disable selinux temporary - sel = self.distro.osutil.is_selinux_enforcing() + sel = self.osutil.is_selinux_enforcing() if sel: - self.distro.osutil.set_selinux_enforce(0) + self.osutil.set_selinux_enforce(0) self.deploy_ssh_pubkeys(ovfenv) self.deploy_ssh_keypairs(ovfenv) if sel: - self.distro.osutil.set_selinux_enforce(1) + self.osutil.set_selinux_enforce(1) - self.distro.osutil.restart_ssh_service() + self.osutil.restart_ssh_service() def save_customdata(self, ovfenv): customdata = ovfenv.customdata @@ -148,7 +152,8 @@ class ProvisionHandler(object): logger.info("Save custom data") lib_dir = conf.get_lib_dir() if conf.get_decode_customdata(): - customdata= self.distro.osutil.decode_customdata(customdata) + customdata= self.osutil.decode_customdata(customdata) + customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) fileutil.write_file(customdata_file, customdata) @@ -160,12 +165,12 @@ class ProvisionHandler(object): def deploy_ssh_pubkeys(self, ovfenv): for pubkey in ovfenv.ssh_pubkeys: logger.info("Deploy ssh public key.") - self.distro.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) + self.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) def deploy_ssh_keypairs(self, ovfenv): for keypair in ovfenv.ssh_keypairs: logger.info("Deploy ssh key pairs.") - self.distro.osutil.deploy_ssh_keypair(ovfenv.username, keypair) + self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) def report_event(self, message, is_success=False): add_event(name="WALA", message=message, is_success=is_success, @@ -175,7 +180,7 @@ class ProvisionHandler(object): status = ProvisionStatus(status="NotReady", subStatus=sub_status, description=description) try: - protocol = self.distro.protocol_util.get_protocol() + protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: self.report_event(ustr(e)) @@ -184,7 +189,7 @@ class ProvisionHandler(object): status = ProvisionStatus(status="Ready") status.properties.certificateThumbprint = thumbprint try: - protocol = self.distro.protocol_util.get_protocol() + protocol = self.protocol_util.get_protocol() protocol.report_provision_status(status) except ProtocolError as e: self.report_event(ustr(e)) diff --git a/azurelinuxagent/distro/suse/distro.py b/azurelinuxagent/pa/provision/factory.py index 5b39369..9bbe35c 100644 --- a/azurelinuxagent/distro/suse/distro.py +++ b/azurelinuxagent/pa/provision/factory.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -17,16 +15,18 @@ # Requires Python 2.4+ and Openssl 1.0+ # -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.suse.osutil import SUSE11OSUtil, SUSEOSUtil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME +from .default import ProvisionHandler +from .ubuntu import UbuntuProvisionHandler -class SUSE11Distro(DefaultDistro): - def __init__(self): - super(SUSE11Distro, self).__init__() - self.osutil = SUSE11OSUtil() +def get_provision_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "ubuntu": + return UbuntuProvisionHandler() -class SUSEDistro(DefaultDistro): - def __init__(self): - super(SUSEDistro, self).__init__() - self.osutil = SUSEOSUtil() + return ProvisionHandler() diff --git a/azurelinuxagent/distro/ubuntu/provision.py b/azurelinuxagent/pa/provision/ubuntu.py index 330e057..c334f23 100644 --- a/azurelinuxagent/distro/ubuntu/provision.py +++ b/azurelinuxagent/pa/provision/ubuntu.py @@ -19,22 +19,22 @@ import os import time -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -import azurelinuxagent.protocol.ovfenv as ovfenv -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProvisionError, ProtocolError -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.provision import ProvisionHandler +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.protocol.ovfenv as ovfenv +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProvisionError, ProtocolError +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.provision.default import ProvisionHandler """ On ubuntu image, provision could be disabled. """ class UbuntuProvisionHandler(ProvisionHandler): - def __init__(self, distro): - self.distro = distro + def __init__(self): + super(UbuntuProvisionHandler, self).__init__() def run(self): #If provision is enabled, run default provision handler @@ -50,7 +50,7 @@ class UbuntuProvisionHandler(ProvisionHandler): logger.info("Waiting cloud-init to copy ovf-env.xml.") self.wait_for_ovfenv() - protocol = self.distro.protocol_util.detect_protocol() + protocol = self.protocol_util.get_protocol() self.report_not_ready("Provisioning", "Starting") logger.info("Sleep 15 seconds to prevent throttling") time.sleep(15) #Sleep to prevent throttling @@ -75,7 +75,7 @@ class UbuntuProvisionHandler(ProvisionHandler): """ for retry in range(0, max_retry): try: - self.distro.protocol_util.get_ovf_env() + self.protocol_util.get_ovf_env() return except ProtocolError: if retry < max_retry - 1: @@ -87,12 +87,12 @@ class UbuntuProvisionHandler(ProvisionHandler): """ Wait for cloud-init to generate ssh host key """ - kepair_type = conf.get_ssh_host_keypair_type() - path = '/etc/ssh/ssh_host_{0}_key'.format(kepair_type) + keypair_type = conf.get_ssh_host_keypair_type() + path = '/etc/ssh/ssh_host_{0}_key.pub'.format(keypair_type) for retry in range(0, max_retry): if os.path.isfile(path): - return self.get_ssh_host_key_thumbprint(kepair_type) + return self.get_ssh_host_key_thumbprint(keypair_type) if retry < max_retry - 1: logger.info("Wait for ssh host key be generated: {0}", path) time.sleep(5) - raise ProvisionError("Ssh hsot key is not generated.") + raise ProvisionError("ssh host key is not generated.") diff --git a/azurelinuxagent/pa/rdma/__init__.py b/azurelinuxagent/pa/rdma/__init__.py new file mode 100644 index 0000000..dff0ba4 --- /dev/null +++ b/azurelinuxagent/pa/rdma/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.rdma.factory import get_rdma_handler diff --git a/azurelinuxagent/pa/rdma/centos.py b/azurelinuxagent/pa/rdma/centos.py new file mode 100644 index 0000000..c527e1b --- /dev/null +++ b/azurelinuxagent/pa/rdma/centos.py @@ -0,0 +1,203 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import glob +import os +import re +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.rdma import RDMAHandler + + +class CentOSRDMAHandler(RDMAHandler): + rdma_user_mode_package_name = 'microsoft-hyper-v-rdma' + rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma' + rdma_wrapper_package_name = 'msft-rdma-drivers' + + hyper_v_package_name = "hypervkvpd" + hyper_v_package_name_new = "microsoft-hyper-v" + + version_major = None + version_minor = None + + def __init__(self, distro_version): + v = distro_version.split('.') + if len(v) < 2: + raise Exception('Unexpected centos version: %s' % distro_version) + self.version_major, self.version_minor = v[0], v[1] + + def install_driver(self): + """ + Install the KVP daemon and the appropriate RDMA driver package for the + RDMA firmware. + """ + fw_version = RDMAHandler.get_rdma_version() + if not fw_version: + raise Exception('Cannot determine RDMA firmware version') + logger.info("RDMA: found firmware version: {0}".format(fw_version)) + fw_version = self.get_int_rdma_version(fw_version) + installed_pkg = self.get_rdma_package_info() + if installed_pkg: + logger.info( + 'RDMA: driver package present: {0}'.format(installed_pkg)) + if self.is_rdma_package_up_to_date(installed_pkg, fw_version): + logger.info('RDMA: driver package is up-to-date') + return + else: + logger.info('RDMA: driver package needs updating') + self.update_rdma_package(fw_version) + else: + logger.info('RDMA: driver package is NOT installed') + self.update_rdma_package(fw_version) + + def is_rdma_package_up_to_date(self, pkg, fw_version): + # Example match (pkg name, -, followed by 3 segments, fw_version and -): + # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 + # - fw_version=142 + pattern = '{0}-\d\.\d\.\d\.({1})-'.format( + self.rdma_user_mode_package_name, fw_version) + return re.match(pattern, pkg) + + @staticmethod + def get_int_rdma_version(version): + s = version.split('.') + if len(s) == 0: + raise Exception('Unexpected RDMA firmware version: "%s"' % version) + return s[0] + + def get_rdma_package_info(self): + """ + Returns the installed rdma package name or None + """ + ret, output = shellutil.run_get_output( + 'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False) + if ret != 0: + return None + return output + + def update_rdma_package(self, fw_version): + logger.info("RDMA: updating RDMA packages") + self.refresh_repos() + self.force_install_package(self.rdma_wrapper_package_name) + self.install_rdma_drivers(fw_version) + + def force_install_package(self, pkg_name): + """ + Attempts to remove existing package and installs the package + """ + logger.info('RDMA: Force installing package: %s' % pkg_name) + if self.uninstall_package(pkg_name) != 0: + logger.info('RDMA: Erasing package failed but will continue') + if self.install_package(pkg_name) != 0: + raise Exception('Failed to install package "{0}"'.format(pkg_name)) + logger.info('RDMA: installation completed: %s' % pkg_name) + + @staticmethod + def uninstall_package(pkg_name): + return shellutil.run('yum erase -y -q {0}'.format(pkg_name)) + + @staticmethod + def install_package(pkg_name): + return shellutil.run('yum install -y -q {0}'.format(pkg_name)) + + def refresh_repos(self): + logger.info("RDMA: refreshing yum repos") + if shellutil.run('yum clean all') != 0: + raise Exception('Cleaning yum repositories failed') + if shellutil.run('yum updateinfo') != 0: + raise Exception('Failed to act on yum repo update information') + logger.info("RDMA: repositories refreshed") + + def install_rdma_drivers(self, fw_version): + """ + Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, + particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or + src). Tries to uninstall them first. + """ + pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format( + self.version_major, self.version_minor) + logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir)) + if not os.path.isdir(pkg_dir): + raise Exception('RDMA packages directory %s is missing' % pkg_dir) + + pkgs = os.listdir(pkg_dir) + logger.info('RDMA: found %d files in package directory' % len(pkgs)) + + # Uninstal KVP daemon first (if exists) + self.uninstall_kvp_driver_package_if_exists() + + # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) + kmod_pkg = self.get_file_by_pattern( + pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) + if not kmod_pkg: + raise Exception("RDMA kernel mode package not found") + kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) + self.uninstall_pkg_and_install_from( + 'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path) + + # Install user mode driver (microsoft-hyper-v-rdma-*) + umod_pkg = self.get_file_by_pattern( + pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) + if not umod_pkg: + raise Exception("RDMA user mode package not found") + umod_pkg_path = os.path.join(pkg_dir, umod_pkg) + self.uninstall_pkg_and_install_from( + 'user mode', self.rdma_user_mode_package_name, umod_pkg_path) + + logger.info("RDMA: driver packages installed") + self.load_driver_module() + if not self.is_driver_loaded(): + logger.info("RDMA: driver module is not loaded; reboot required") + self.reboot_system() + else: + logger.info("RDMA: kernel module is loaded") + + @staticmethod + def get_file_by_pattern(list, pattern): + for l in list: + if re.match(pattern, l): + return l + return None + + def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path): + logger.info( + "RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path)) + logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name) + if self.uninstall_package(pkg_name) == 0: + logger.info("RDMA: Successfully uninstaled %s" % pkg_name) + logger.info( + "RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path)) + if self.install_package(pkg_path) != 0: + raise Exception( + "Failed to install RDMA {0} package".format(pkg_type)) + + def uninstall_kvp_driver_package_if_exists(self): + kvp_pkgs = [self.hyper_v_package_name, + self.hyper_v_package_name_new] + + for kvp_pkg in kvp_pkgs: + if shellutil.run("rpm -q %s" % kvp_pkg, chk_err=False) != 0: + logger.info( + "RDMA: kvp package %s does not exist, skipping" % kvp_pkg) + else: + logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg) + if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0: + logger.info("RDMA: successfully erased package") + else: + logger.error("RDMA: failed to erase package") diff --git a/azurelinuxagent/pa/rdma/factory.py b/azurelinuxagent/pa/rdma/factory.py new file mode 100644 index 0000000..535b3d3 --- /dev/null +++ b/azurelinuxagent/pa/rdma/factory.py @@ -0,0 +1,41 @@ +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION +from azurelinuxagent.common.rdma import RDMAHandler +from .suse import SUSERDMAHandler +from .centos import CentOSRDMAHandler + + +def get_rdma_handler( + distro_full_name=DISTRO_FULL_NAME, + distro_version=DISTRO_VERSION +): + """Return the handler object for RDMA driver handling""" + if ( + distro_full_name == 'SUSE Linux Enterprise Server' and + int(distro_version) > 11 + ): + return SUSERDMAHandler() + + if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS': + return CentOSRDMAHandler(distro_version) + + logger.info("No RDMA handler exists for distro='{0}' version='{1}'", distro_full_name, distro_version) + return RDMAHandler() diff --git a/azurelinuxagent/pa/rdma/suse.py b/azurelinuxagent/pa/rdma/suse.py new file mode 100644 index 0000000..f0d8d0f --- /dev/null +++ b/azurelinuxagent/pa/rdma/suse.py @@ -0,0 +1,130 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import glob +import os +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.rdma import RDMAHandler + + +class SUSERDMAHandler(RDMAHandler): + + def install_driver(self): + """Install the appropriate driver package for the RDMA firmware""" + + fw_version = RDMAHandler.get_rdma_version() + if not fw_version: + error_msg = 'RDMA: Could not determine firmware version. ' + error_msg += 'Therefore, no driver will be installed.' + logger.error(error_msg) + return + zypper_install = 'zypper -n in %s' + zypper_remove = 'zypper -n rm %s' + zypper_search = 'zypper se -s %s' + package_name = 'msft-rdma-kmp-default' + cmd = zypper_search % package_name + status, repo_package_info = shellutil.run_get_output(cmd) + driver_package_versions = [] + driver_package_installed = False + for entry in repo_package_info.split('\n'): + if package_name in entry: + sections = entry.split('|') + if len(sections) < 4: + error_msg = 'RDMA: Unexpected output from"%s": "%s"' + logger.error(error_msg % (cmd, entry)) + continue + installed = sections[0].strip() + version = sections[3].strip() + driver_package_versions.append(version) + if fw_version in version and installed == 'i': + info_msg = 'RDMA: Matching driver package "%s-%s" ' + info_msg += 'is already installed, nothing to do.' + logger.info(info_msg % (package_name, version)) + return True + if installed == 'i': + driver_package_installed = True + + # If we get here the driver package is installed but the + # version doesn't match or no package is installed + requires_reboot = False + if driver_package_installed: + # Unloading the particular driver with rmmod does not work + # We have to reboot after the new driver is installed + if self.is_driver_loaded(): + info_msg = 'RDMA: Currently loaded driver does not match the ' + info_msg += 'firmware implementation, reboot will be required.' + logger.info(info_msg) + requires_reboot = True + logger.info("RDMA: removing package %s" % package_name) + cmd = zypper_remove % package_name + shellutil.run(cmd) + logger.info("RDMA: removed package %s" % package_name) + + logger.info("RDMA: looking for fw version %s in packages" % fw_version) + for entry in driver_package_versions: + if not fw_version in version: + logger.info("Package '%s' is not a match." % entry) + else: + logger.info("Package '%s' is a match. Installing." % entry) + complete_name = '%s-%s' % (package_name, version) + cmd = zypper_install % complete_name + result = shellutil.run(cmd) + if result: + error_msg = 'RDMA: Failed install of package "%s" ' + error_msg += 'from available repositories.' + logger.error(error_msg % complete_name) + msg = 'RDMA: Successfully installed "%s" from ' + msg += 'configured repositories' + logger.info(msg % complete_name) + self.load_driver_module() + if requires_reboot: + self.reboot_system() + return True + else: + logger.info("RDMA: No suitable match in repos. Trying local.") + local_packages = glob.glob('/opt/microsoft/rdma/*.rpm') + for local_package in local_packages: + logger.info("Examining: %s" % local_package) + if local_package.endswith('.src.rpm'): + continue + if ( + package_name in local_package and + fw_version in local_package + ): + logger.info("RDMA: Installing: %s" % local_package) + cmd = zypper_install % local_package + result = shellutil.run(cmd) + if result: + error_msg = 'RDMA: Failed install of package "%s" ' + error_msg += 'from local package cache' + logger.error(error_msg % local_package) + break + msg = 'RDMA: Successfully installed "%s" from ' + msg += 'local package cache' + logger.info(msg % (local_package)) + self.load_driver_module() + if requires_reboot: + self.reboot_system() + return True + else: + error_msg = 'Unable to find driver package that matches ' + error_msg += 'RDMA firmware version "%s"' % fw_version + logger.error(error_msg) + return diff --git a/azurelinuxagent/utils/__init__.py b/azurelinuxagent/utils/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/utils/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/bin/waagent2.0 b/bin/waagent2.0 index 673a04c..abc4448 100644 --- a/bin/waagent2.0 +++ b/bin/waagent2.0 @@ -228,6 +228,9 @@ class AbstractDistro(object): Returns exit result. """ if self.isSelinuxSystem(): + if not os.path.exists(path): + Error("Path does not exist: {0}".format(path)) + return 1 return Run('chcon ' + cn + ' ' + path) def setHostname(self,name): @@ -2336,7 +2339,10 @@ def ChangeOwner(filepath, user): except: pass if p != None: - os.chown(filepath, p[2], p[3]) + if not os.path.exists(filepath): + Error("Path does not exist: {0}".format(filepath)) + else: + os.chown(filepath, p[2], p[3]) def CreateDir(dirpath, user, mode): """ @@ -4079,7 +4085,7 @@ class ExtensionsConfig(object): try: incarnation=self.Extensions[0].getAttribute("goalStateIncarnation") except: - Error('Error parsing ExtensionsConfig. Unable to send status reports') + Error('Error parsing attribute "goalStateIncarnation". Unable to send status reports') return -1 status='' statuses='' @@ -4117,7 +4123,7 @@ class ExtensionsConfig(object): try: uri=GetNodeTextData(self.Extensions[0].getElementsByTagName("StatusUploadBlob")[0]).replace('&','&') except: - Error('Error parsing ExtensionsConfig. Unable to send status reports') + Error('Error parsing element "StatusUploadBlob". Unable to send status reports') return -1 LogIfVerbose('Status report '+status+' sent to ' + uri) @@ -4607,7 +4613,7 @@ class OvfEnv(object): if len(CDSection) > 0 : self.CustomData=GetNodeTextData(CDSection[0]) if len(self.CustomData)>0: - SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData))) + SetFileContents(LibDir + '/CustomData', bytearray(MyDistro.translateCustomData(self.CustomData), 'utf-8')) Log('Wrote ' + LibDir + '/CustomData') else : Error('<CustomData> contains no data!') @@ -4912,8 +4918,9 @@ class WALAEventMonitor(WALAEvent): def EventsLoop(self): LastReportHeartBeatTime = datetime.datetime.min try: - while(True): - if (datetime.datetime.now()-LastReportHeartBeatTime) > datetime.timedelta(hours=12): + while True: + if (datetime.datetime.now()-LastReportHeartBeatTime) > \ + datetime.timedelta(minutes=30): LastReportHeartBeatTime = datetime.datetime.now() AddExtensionEvent(op=WALAEventOperation.HeartBeat,name="WALA",isSuccess=True) self.postNumbersInOneLoop=0 diff --git a/config/66-azure-storage.rules b/config/66-azure-storage.rules new file mode 100644 index 0000000..ab30628 --- /dev/null +++ b/config/66-azure-storage.rules @@ -0,0 +1,18 @@ +ACTION!="add|change", GOTO="azure_end" +SUBSYSTEM!="block", GOTO="azure_end" +ATTRS{ID_VENDOR}!="Msft", GOTO="azure_end" +ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="azure_end" + +# Root has a GUID of 0000 as the second value +# The resource/resource has GUID of 0001 as the second value +ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" +ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" +GOTO="azure_end" + +# Create the symlinks +LABEL="azure_names" +ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" +ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" + +LABEL="azure_end" + diff --git a/config/99-azure-product-uuid.rules b/config/99-azure-product-uuid.rules new file mode 100644 index 0000000..a5af9b1 --- /dev/null +++ b/config/99-azure-product-uuid.rules @@ -0,0 +1,9 @@ +SUBSYSTEM!="dmi", GOTO="product_uuid-exit" +ATTR{sys_vendor}!="Microsoft Corporation", GOTO="product_uuid-exit" +ATTR{product_name}!="Virtual Machine", GOTO="product_uuid-exit" +TEST!="/sys/devices/virtual/dmi/id/product_uuid", GOTO="product_uuid-exit" + +RUN+="/bin/chmod 0444 /sys/devices/virtual/dmi/id/product_uuid" + +LABEL="product_uuid-exit" + diff --git a/config/coreos/waagent.conf b/config/coreos/waagent.conf index 5af0baf..05095f3 100644 --- a/config/coreos/waagent.conf +++ b/config/coreos/waagent.conf @@ -17,19 +17,19 @@ Role.TopologyConsumer=None Provisioning.Enabled=y # Password authentication for root account will be unavailable. -Provisioning.DeleteRootPassword=y +Provisioning.DeleteRootPassword=n # Generate fresh host key pair. -Provisioning.RegenerateSshHostKeyPair=y +Provisioning.RegenerateSshHostKeyPair=n # Supported values are "rsa", "dsa" and "ecdsa". -Provisioning.SshHostKeyPairType=rsa +Provisioning.SshHostKeyPairType=ed25519 # Monitor host name changes and publish changes via DHCP requests. Provisioning.MonitorHostName=y # Decode CustomData from Base64. -Provisioning.DecodeCustomData=y +Provisioning.DecodeCustomData=n # Execute CustomData after provisioning. Provisioning.ExecuteCustomData=n @@ -59,6 +59,9 @@ ResourceDisk.EnableSwap=n # Size of the swapfile. ResourceDisk.SwapSizeMB=0 +# Respond to load balancer probes if requested by Windows Azure. +LBProbeResponder=y + # Enable verbose logging (y|n) Logs.Verbose=n @@ -89,3 +92,10 @@ OS.OpensslPath=None # # Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable self-update, default is enabled +AutoUpdate.Enabled=y +AutoUpdate.GAFamily=Prod diff --git a/config/freebsd/waagent.conf b/config/freebsd/waagent.conf new file mode 100644 index 0000000..907b219 --- /dev/null +++ b/config/freebsd/waagent.conf @@ -0,0 +1,99 @@ +# +# Microsoft Azure Linux Agent Configuration +# + +# Specified program is invoked with the argument "Ready" when we report ready status +# to the endpoint server. +Role.StateConsumer=None + +# Specified program is invoked with XML file argument specifying role +# configuration. +Role.ConfigurationConsumer=None + +# Specified program is invoked with XML file argument specifying role topology. +Role.TopologyConsumer=None + +# Enable instance creation +Provisioning.Enabled=y + +# Password authentication for root account will be unavailable. +Provisioning.DeleteRootPassword=y + +# Generate fresh host key pair. +Provisioning.RegenerateSshHostKeyPair=y + +# Supported values are "rsa", "dsa" and "ecdsa". +Provisioning.SshHostKeyPairType=rsa + +# Monitor host name changes and publish changes via DHCP requests. +Provisioning.MonitorHostName=y + +# Decode CustomData from Base64. +Provisioning.DecodeCustomData=n + +# Execute CustomData after provisioning. +Provisioning.ExecuteCustomData=n + +# Algorithm used by crypt when generating password hash. +#Provisioning.PasswordCryptId=6 + +# Length of random salt used when generating password hash. +#Provisioning.PasswordCryptSaltLength=10 + +# Format if unformatted. If 'n', resource disk will not be mounted. +ResourceDisk.Format=y + +# File system on the resource disk +# Typically ext3 or ext4. FreeBSD images should use 'ufs' here. +ResourceDisk.Filesystem=ufs + +# Mount point for the resource disk +ResourceDisk.MountPoint=/mnt/resource + +# Create and use swapfile on resource disk. +ResourceDisk.EnableSwap=n + +# Size of the swapfile. +ResourceDisk.SwapSizeMB=0 + +# Enable verbose logging (y|n) +Logs.Verbose=n + +# Root device timeout in seconds. +OS.RootDeviceScsiTimeout=300 + +# If "None", the system default version is used. +OS.OpensslPath=None + +OS.PasswordPath=/etc/master.passwd + +OS.SudoersDir=/usr/local/etc/sudoers.d + +# If set, agent will use proxy server to access internet +#HttpProxy.Host=None +#HttpProxy.Port=None + +# Detect Scvmm environment, default is n +# DetectScvmmEnv=n + +# +# Lib.Dir=/var/lib/waagent + +# +# DVD.MountPoint=/mnt/cdrom/secure + +# +# Pid.File=/var/run/waagent.pid + +# +# Extension.LogDir=/var/log/azure + +# +# Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable self-update, default is enabled +AutoUpdate.Enabled=y +AutoUpdate.GAFamily=Prod diff --git a/config/suse/waagent.conf b/config/suse/waagent.conf index 7cd6d37..8ea3195 100644 --- a/config/suse/waagent.conf +++ b/config/suse/waagent.conf @@ -77,3 +77,10 @@ OS.OpensslPath=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable self-update, default is enabled +AutoUpdate.Enabled=y +AutoUpdate.GAFamily=Prod diff --git a/config/ubuntu/waagent.conf b/config/ubuntu/waagent.conf index ee60045..ca68044 100644 --- a/config/ubuntu/waagent.conf +++ b/config/ubuntu/waagent.conf @@ -77,3 +77,10 @@ OS.OpensslPath=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable self-update, default is enabled +AutoUpdate.Enabled=y +AutoUpdate.GAFamily=Prod diff --git a/config/waagent.conf b/config/waagent.conf index e808117..c28a651 100644 --- a/config/waagent.conf +++ b/config/waagent.conf @@ -89,3 +89,10 @@ OS.OpensslPath=None # # Home.Dir=/home + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y + +# Enable or disable self-update, default is enabled +AutoUpdate.Enabled=y +AutoUpdate.GAFamily=Prod diff --git a/debian/66-azure-storage.rules b/debian/66-azure-storage.rules index 1b64824..461db71 100644..120000 --- a/debian/66-azure-storage.rules +++ b/debian/66-azure-storage.rules @@ -1,17 +1 @@ -ACTION!="add|change", GOTO="azure_end" -SUBSYSTEM!="block", GOTO="azure_end" -ATTRS{ID_VENDOR}!="Msft", GOTO="azure_end" -ATTRS{ID_MODEL}!="Virtual_Disk", GOTO="azure_end" - -# Root has a GUID of 0000 as the second value -# The resource/resource has GUID of 0001 as the second value -ATTRS{device_id}=="?00000000-0000-*", ENV{fabric_name}="root", GOTO="azure_names" -ATTRS{device_id}=="?00000000-0001-*", ENV{fabric_name}="resource", GOTO="azure_names" -GOTO="azure_end" - -# Create the symlinks -LABEL="azure_names" -ENV{DEVTYPE}=="disk", SYMLINK+="disk/azure/$env{fabric_name}" -ENV{DEVTYPE}=="partition", SYMLINK+="disk/azure/$env{fabric_name}-part%n" - -LABEL="azure_end" +../config/66-azure-storage.rules
\ No newline at end of file diff --git a/debian/changelog b/debian/changelog index bdbae20..ed5f944 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,15 @@ +walinuxagent (2.1.5-0ubuntu1) yakkety; urgency=medium + + * New upstream release (LP: #1603581) + - d/patches/disable-auto-update.patch: + - The new version introduces auto-updating of the agent to its latest + version via an internal mechanism; disable this + - d/patches/fix_shebangs.patch: + - Dropped in favour of the dh_python3 --shebang option. + - Refreshed d/patches/disable_udev_overrides.patch + + -- Daniel Watkins <daniel.watkins@canonical.com> Tue, 13 Sep 2016 16:11:47 +0100 + walinuxagent (2.1.3-0ubuntu4) xenial; urgency=medium * Correct ephemeral warning to include the full command for removal diff --git a/debian/control b/debian/control index f8a00bb..3d4e095 100644 --- a/debian/control +++ b/debian/control @@ -21,7 +21,7 @@ Depends: cloud-init (>=0.7.3~bzr826-0ubuntu2), passwd (>=4.1.4.2), util-linux (>=2.0), ${misc:Depends}, - ${python:Depends} + ${python3:Depends} Recommends: linux-image-extra-virtual Description: Windows Azure Linux Agent The Windows Azure Linux Agent supports the provisioning and running of Linux diff --git a/debian/docs b/debian/docs index c401623..ffb38b3 100644 --- a/debian/docs +++ b/debian/docs @@ -1,6 +1,6 @@ debian/99-cloud-init-disable-diskprovisioning.conf NOTICE -LICENSE-2.0.txt +LICENSE.txt README.md MANIFEST Changelog diff --git a/debian/patches/disable-auto-update.patch b/debian/patches/disable-auto-update.patch new file mode 100644 index 0000000..166c6ec --- /dev/null +++ b/debian/patches/disable-auto-update.patch @@ -0,0 +1,9 @@ +--- a/config/ubuntu/waagent.conf ++++ b/config/ubuntu/waagent.conf +@@ -82,5 +82,5 @@ + # OS.EnableRDMA=y + + # Enable or disable self-update, default is enabled +-AutoUpdate.Enabled=y ++AutoUpdate.Enabled=n + AutoUpdate.GAFamily=Prod diff --git a/debian/patches/disable_udev_overrides.patch b/debian/patches/disable_udev_overrides.patch index e0bb607..7c344d0 100644 --- a/debian/patches/disable_udev_overrides.patch +++ b/debian/patches/disable_udev_overrides.patch @@ -10,28 +10,3 @@ VarLibDhcpDirectories = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] EtcDhcpClientConfFiles = ["/etc/dhcp/dhclient.conf", "/etc/dhcp3/dhclient.conf"] global LibDir ---- a/azurelinuxagent/distro/ubuntu/osutil.py -+++ b/azurelinuxagent/distro/ubuntu/osutil.py -@@ -44,6 +44,12 @@ - def start_agent_service(self): - return shellutil.run("service walinuxagent start", chk_err=False) - -+ def remove_rules_files(self, rules_files=""): -+ pass -+ -+ def restore_rules_files(self, rules_files=""): -+ pass -+ - class Ubuntu12OSUtil(Ubuntu14OSUtil): - def __init__(self): - super(Ubuntu12OSUtil, self).__init__() -@@ -67,9 +73,3 @@ - def __init__(self): - super(UbuntuSnappyOSUtil, self).__init__() - self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' -- -- def remove_rules_files(self, rules_files=""): -- pass -- -- def restore_rules_files(self, rules_files=""): -- pass diff --git a/debian/patches/fix_shebangs.patch b/debian/patches/fix_shebangs.patch deleted file mode 100644 index 828b0f2..0000000 --- a/debian/patches/fix_shebangs.patch +++ /dev/null @@ -1,24 +0,0 @@ ---- a/setup.py -+++ b/setup.py -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3n3n3 - # - # Microsoft Azure Linux Agent setup.py - # ---- a/bin/waagent -+++ b/bin/waagent -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - # - # Azure Linux Agent - # ---- a/bin/waagent2.0 -+++ b/bin/waagent2.0 -@@ -1,4 +1,4 @@ --#!/usr/bin/env python -+#!/usr/bin/env python3 - # - # Azure Linux Agent - # diff --git a/debian/patches/series b/debian/patches/series index a2f35e0..452e72a 100644 --- a/debian/patches/series +++ b/debian/patches/series @@ -1,4 +1,4 @@ +disable-auto-update.patch cloud-init-default-cfg.patch disable_import_test.patch disable_udev_overrides.patch -fix_shebangs.patch diff --git a/debian/rules b/debian/rules index 8ec7cb1..91d44ae 100755 --- a/debian/rules +++ b/debian/rules @@ -22,3 +22,6 @@ override_dh_installinit: override_dh_systemd_enable: dh_systemd_enable --name walinuxagent walinuxagent.service dh_systemd_enable --name ephemeral-disk-warning ephemeral-disk-warning.service + +override_dh_python3: + dh_python3 -O--buildsystem=pybuild --shebang "/usr/bin/env python3" diff --git a/init/coreos/cloud-config.yml b/init/coreos/cloud-config.yml index 461dc1a..f8efc59 100644 --- a/init/coreos/cloud-config.yml +++ b/init/coreos/cloud-config.yml @@ -47,6 +47,6 @@ coreos: oem: id: azure name: Microsoft Azure - version-id: 2.1.0 + version-id: 2.1.4 home-url: https://azure.microsoft.com/ bug-report-url: https://github.com/coreos/bugs/issues diff --git a/init/freebsd/waagent b/init/freebsd/waagent new file mode 100755 index 0000000..99ddef7 --- /dev/null +++ b/init/freebsd/waagent @@ -0,0 +1,18 @@ +#!/bin/sh + +# PROVIDE: waagent +# REQUIRE: sshd netif dhclient +# KEYWORD: nojail + +. /etc/rc.subr + +PATH=$PATH:/usr/local/bin +name="waagent" +rcvar="waagent_enable" +pidfile="/var/run/waagent.pid" +command="/usr/local/sbin/${name}" +command_interpreter="python" +command_args="start" + +load_rc_config $name +run_rc_command "$1" diff --git a/init/ubuntu/walinuxagent.service b/init/ubuntu/walinuxagent.service index 681435e..ae8dccb 100755 --- a/init/ubuntu/walinuxagent.service +++ b/init/ubuntu/walinuxagent.service @@ -1,3 +1,9 @@ +# +# NOTE: +# This file hosted on WALinuxAgent repository only for reference purposes. +# Please refer to a recent image to find out the up-to-date systemd unit file. +# + [Unit] Description=Azure Linux Agent @@ -9,7 +15,7 @@ ConditionPathExists=/etc/waagent.conf [Service] Type=simple -ExecStart=/usr/bin/python3 /usr/sbin/waagent -daemon +ExecStart=/usr/bin/python3 -u /usr/sbin/waagent -daemon Restart=always [Install] diff --git a/init/waagent.service b/init/waagent.service index f465f79..e91f143 100755..100644 --- a/init/waagent.service +++ b/init/waagent.service @@ -1,14 +1,16 @@ [Unit] Description=Azure Linux Agent -After=network.target -After=sshd.service +Wants=network-online.target sshd.service sshd-keygen.service +After=network-online.target + ConditionFileIsExecutable=/usr/sbin/waagent ConditionPathExists=/etc/waagent.conf [Service] Type=simple -ExecStart=/usr/sbin/waagent -daemon +ExecStart=/usr/bin/python -u /usr/sbin/waagent -daemon Restart=always +RestartSec=5 [Install] WantedBy=multi-user.target diff --git a/makepkg.py b/makepkg.py new file mode 100755 index 0000000..ca7ad20 --- /dev/null +++ b/makepkg.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python + +import glob +import os +import os.path +import shutil +import subprocess +import sys + +from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION +from azurelinuxagent.ga.update import AGENT_MANIFEST_FILE + + +MANIFEST = '''[{{ + "name": "{0}", + "version": 1.0, + "handlerManifest": {{ + "installCommand": "", + "uninstallCommand": "", + "updateCommand": "", + "enableCommand": "python -u {1} -run-exthandlers", + "disableCommand": "", + "rebootAfterInstall": false, + "reportHeartbeat": false + }} +}}]''' + + +output_path = os.path.join(os.getcwd(), "eggs") +target_path = os.path.join(output_path, AGENT_LONG_VERSION) +bin_path = os.path.join(target_path, "bin") +egg_path = os.path.join(bin_path, AGENT_LONG_VERSION + ".egg") +manifest_path = os.path.join(target_path, AGENT_MANIFEST_FILE) +pkg_name = os.path.join(output_path, AGENT_LONG_VERSION + ".zip") + +def do(*args): + try: + subprocess.check_output(args, stderr=subprocess.STDOUT) + except subprocess.CalledProcessError as e: + print "ERROR: {0}".format(str(e)) + print "\t{0}".format(" ".join(args)) + print e.output + sys.exit(1) + +if os.path.isdir(target_path): + shutil.rmtree(target_path) +elif os.path.isfile(target_path): + os.remove(target_path) +if os.path.isfile(pkg_name): + os.remove(pkg_name) +os.makedirs(bin_path) +print "Created {0} directory".format(target_path) + +args = ["python", "setup.py"] +args.append("bdist_egg") +args.append("--dist-dir={0}".format(bin_path)) + +print "Creating egg {0}".format(egg_path) +do(*args) + +egg_name = os.path.join("bin", os.path.basename(glob.glob(os.path.join(bin_path, "*"))[0])) + +print "Writing {0}".format(manifest_path) +with open(manifest_path, mode='w') as manifest: + manifest.write(MANIFEST.format(AGENT_NAME, egg_name)) + +cwd = os.getcwd() +os.chdir(target_path) +print "Creating package {0}".format(pkg_name) +do("zip", "-r", pkg_name, egg_name) +do("zip", "-j", pkg_name, AGENT_MANIFEST_FILE) +os.chdir(cwd) + +print "Package {0} successfully created".format(pkg_name) +sys.exit(0) @@ -18,11 +18,11 @@ # import os -from azurelinuxagent.metadata import AGENT_NAME, AGENT_VERSION, \ +from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, \ AGENT_DESCRIPTION, \ DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME -from azurelinuxagent.agent import Agent +from azurelinuxagent.common.osutil import get_osutil import setuptools from setuptools import find_packages from setuptools.command.install import install as _install @@ -51,6 +51,13 @@ def set_systemd_files(data_files, dest="/lib/systemd/system", src=["init/waagent.service"]): data_files.append((dest, src)) +def set_rc_files(data_files, dest="/etc/rc.d/", src=["init/freebsd/waagent"]): + data_files.append((dest, src)) + +def set_udev_files(data_files, dest="/etc/udev/rules.d/", src=["config/66-azure-storage.rules", "config/99-azure-product-uuid.rules"]): + data_files.append((dest, src)) + + def get_data_files(name, version, fullname): """ Determine data_files according to distro name, version and init system type @@ -61,6 +68,7 @@ def get_data_files(name, version, fullname): set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) + set_udev_files(data_files) if version.startswith("6"): set_sysv_files(data_files) else: @@ -75,12 +83,14 @@ def get_data_files(name, version, fullname): set_conf_files(data_files, dest="/usr/share/oem", src=["config/coreos/waagent.conf"]) set_logrotate_files(data_files) + set_udev_files(data_files) set_files(data_files, dest="/usr/share/oem", - src="init/coreos/cloud-config.yml") + src=["init/coreos/cloud-config.yml"]) elif name == 'ubuntu': set_bin_files(data_files) set_conf_files(data_files, src=["config/ubuntu/waagent.conf"]) set_logrotate_files(data_files) + set_udev_files(data_files, src=["config/99-azure-product-uuid.rules"]) if version.startswith("12") or version.startswith("14"): #Ubuntu12.04/14.04 - uses upstart set_files(data_files, dest="/etc/init", @@ -98,6 +108,7 @@ def get_data_files(name, version, fullname): set_bin_files(data_files) set_conf_files(data_files, src=["config/suse/waagent.conf"]) set_logrotate_files(data_files) + set_udev_files(data_files) if fullname == 'SUSE Linux Enterprise Server' and \ version.startswith('11') or \ fullname == 'openSUSE' and version.startswith('13.1'): @@ -106,11 +117,16 @@ def get_data_files(name, version, fullname): else: #sles 12+ and openSUSE 13.2+ use systemd set_systemd_files(data_files, dest='/usr/lib/systemd/system') + elif name == 'freebsd': + set_bin_files(data_files, dest="/usr/local/sbin") + set_conf_files(data_files, src=["config/freebsd/waagent.conf"]) + set_rc_files(data_files) else: #Use default setting set_bin_files(data_files) set_conf_files(data_files) set_logrotate_files(data_files) + set_udev_files(data_files) set_sysv_files(data_files) return data_files @@ -150,17 +166,20 @@ class install(_install): def run(self): _install.run(self) if self.register_service: - Agent(False).register_service() - -setuptools.setup(name=AGENT_NAME, - version=AGENT_VERSION, - long_description=AGENT_DESCRIPTION, - author= 'Yue Zhang, Stephen Zarkos, Eric Gable', - author_email = 'walinuxagent@microsoft.com', - platforms = 'Linux', - url='https://github.com/Azure/WALinuxAgent', - license = 'Apache License Version 2.0', - packages=find_packages(exclude=["tests"]), - cmdclass = { - 'install': install - }) + get_osutil().register_agent_service() + +setuptools.setup( + name=AGENT_NAME, + version=AGENT_VERSION, + long_description=AGENT_DESCRIPTION, + author= 'Yue Zhang, Stephen Zarkos, Eric Gable', + author_email = 'walinuxagent@microsoft.com', + platforms = 'Linux', + url='https://github.com/Azure/WALinuxAgent', + license = 'Apache License Version 2.0', + packages=find_packages(exclude=["tests"]), + py_modules=["__main__"], + cmdclass = { + 'install': install + } +) diff --git a/snappy/waagent.conf b/snappy/waagent.conf index 74dccb8..3862cad 100644 --- a/snappy/waagent.conf +++ b/snappy/waagent.conf @@ -68,3 +68,6 @@ OS.OpensslPath=None # Detect Scvmm environment, default is n # DetectScvmmEnv=n + +# Enable RDMA management and set up, should only be used in HPC images +# OS.EnableRDMA=y diff --git a/tests/__init__.py b/tests/__init__.py index 9bdb27e..2ef4c16 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -14,6 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx diff --git a/azurelinuxagent/protocol/__init__.py b/tests/common/__init__.py index 8c1bbdb..2ef4c16 100644 --- a/azurelinuxagent/protocol/__init__.py +++ b/tests/common/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/coreos/__init__.py b/tests/common/dhcp/__init__.py index 8c1bbdb..2ef4c16 100644 --- a/azurelinuxagent/distro/coreos/__init__.py +++ b/tests/common/dhcp/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/tests/common/dhcp/test_dhcp.py b/tests/common/dhcp/test_dhcp.py new file mode 100644 index 0000000..c27ffe8 --- /dev/null +++ b/tests/common/dhcp/test_dhcp.py @@ -0,0 +1,107 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import mock +import azurelinuxagent.common.dhcp as dhcp +import azurelinuxagent.common.osutil.default as osutil +from tests.tools import * + + +class TestDHCP(AgentTestCase): + def test_wireserver_route_exists(self): + # setup + dhcp_handler = dhcp.get_dhcp_handler() + self.assertTrue(dhcp_handler.endpoint is None) + self.assertTrue(dhcp_handler.routes is None) + self.assertTrue(dhcp_handler.gateway is None) + + # execute + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric " \ + "Mask MTU Window IRTT \n\ + eth0 00000000 10813FA8 0003 0 0 5 " \ + "00000000 0 0 0 \n\ + eth0 00345B0A 00000000 0001 0 0 5 " \ + "00000000 0 0 0 \n\ + lo 00000000 01345B0A 0003 0 0 1 " \ + "00FCFFFF 0 0 0 \n" + + with patch("os.path.exists", return_value=True): + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertTrue(dhcp_handler.wireserver_route_exists) + + # test + self.assertTrue(dhcp_handler.endpoint is not None) + self.assertTrue(dhcp_handler.routes is None) + self.assertTrue(dhcp_handler.gateway is None) + + def test_wireserver_route_not_exists(self): + # setup + dhcp_handler = dhcp.get_dhcp_handler() + self.assertTrue(dhcp_handler.endpoint is None) + self.assertTrue(dhcp_handler.routes is None) + self.assertTrue(dhcp_handler.gateway is None) + + # execute + self.assertFalse(dhcp_handler.wireserver_route_exists) + + # test + self.assertTrue(dhcp_handler.endpoint is None) + self.assertTrue(dhcp_handler.routes is None) + self.assertTrue(dhcp_handler.gateway is None) + + def test_dhcp_cache_exists(self): + dhcp_handler = dhcp.get_dhcp_handler() + dhcp_handler.osutil = osutil.DefaultOSUtil() + with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', + return_value=None): + self.assertFalse(dhcp_handler.dhcp_cache_exists) + self.assertEqual(dhcp_handler.endpoint, None) + with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint', + return_value="foo"): + self.assertTrue(dhcp_handler.dhcp_cache_exists) + self.assertEqual(dhcp_handler.endpoint, "foo") + + def test_dhcp_skip_cache(self): + handler = dhcp.get_dhcp_handler() + handler.osutil = osutil.DefaultOSUtil() + with patch('os.path.exists', return_value=False): + with patch.object(osutil.DefaultOSUtil, 'get_dhcp_lease_endpoint')\ + as patch_dhcp_cache: + with patch.object(dhcp.DhcpHandler, 'send_dhcp_req') \ + as patch_dhcp_send: + + endpoint = 'foo' + patch_dhcp_cache.return_value = endpoint + + # endpoint comes from cache + self.assertFalse(handler.skip_cache) + handler.run() + self.assertTrue(patch_dhcp_cache.call_count == 1) + self.assertTrue(patch_dhcp_send.call_count == 0) + self.assertTrue(handler.endpoint == endpoint) + + # reset + handler.skip_cache = True + handler.endpoint = None + + # endpoint comes from dhcp request + self.assertTrue(handler.skip_cache) + handler.run() + self.assertTrue(patch_dhcp_cache.call_count == 1) + self.assertTrue(patch_dhcp_send.call_count == 1) diff --git a/tests/common/osutil/__init__.py b/tests/common/osutil/__init__.py new file mode 100644 index 0000000..2ef4c16 --- /dev/null +++ b/tests/common/osutil/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# diff --git a/tests/common/osutil/test_default.py b/tests/common/osutil/test_default.py new file mode 100644 index 0000000..d9d00f6 --- /dev/null +++ b/tests/common/osutil/test_default.py @@ -0,0 +1,146 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import socket +import glob +import mock +import azurelinuxagent.common.osutil.default as osutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil import get_osutil +from tests.tools import * + + +class TestOSUtil(AgentTestCase): + def test_restart(self): + # setup + retries = 3 + ifname = 'dummy' + with patch.object(shellutil, "run") as run_patch: + run_patch.return_value = 1 + + # execute + osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0) + + # assert + self.assertEqual(run_patch.call_count, retries) + self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname)) + + def test_get_first_if(self): + ifname, ipaddr = osutil.DefaultOSUtil().get_first_if() + self.assertTrue(ifname.startswith('eth')) + self.assertTrue(ipaddr is not None) + try: + socket.inet_aton(ipaddr) + except socket.error: + self.fail("not a valid ip address") + + def test_isloopback(self): + self.assertTrue(osutil.DefaultOSUtil().is_loopback(b'lo')) + self.assertFalse(osutil.DefaultOSUtil().is_loopback(b'eth0')) + + def test_isprimary(self): + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ + eth0 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ + eth0 00345B0A 00000000 0001 0 0 5 00000000 0 0 0 \n\ + lo 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('lo')) + self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('eth0')) + + def test_multiple_default_routes(self): + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ + high 00000000 01345B0A 0003 0 0 5 00000000 0 0 0 \n\ + low1 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('low1')) + + def test_multiple_interfaces(self): + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ + first 00000000 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ + secnd 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('first')) + + def test_interface_flags(self): + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ + nflg 00000000 01345B0A 0001 0 0 1 00000000 0 0 0 \n\ + flgs 00000000 01345B0A 0003 0 0 1 00FCFFFF 0 0 0 \n" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertTrue(osutil.DefaultOSUtil().is_primary_interface('flgs')) + + def test_no_interface(self): + routing_table = "\ + Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT \n\ + ndst 00000001 01345B0A 0003 0 0 1 00000000 0 0 0 \n\ + nflg 00000000 01345B0A 0001 0 0 1 00FCFFFF 0 0 0 \n" + + mo = mock.mock_open(read_data=routing_table) + with patch(open_patch(), mo): + self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('ndst')) + self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('nflg')) + self.assertFalse(osutil.DefaultOSUtil().is_primary_interface('invalid')) + + def test_no_primary_does_not_throw(self): + with patch.object(osutil.DefaultOSUtil, 'get_primary_interface') \ + as patch_primary: + exception = False + patch_primary.return_value = '' + try: + osutil.DefaultOSUtil().get_first_if()[0] + except Exception as e: + exception = True + self.assertFalse(exception) + + def test_dhcp_lease_default(self): + self.assertTrue(osutil.DefaultOSUtil().get_dhcp_lease_endpoint() is None) + + def test_dhcp_lease_ubuntu(self): + with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): + with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases"))): + endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() + self.assertTrue(endpoint is not None) + self.assertEqual(endpoint, "168.63.129.16") + + endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() + self.assertTrue(endpoint is not None) + self.assertEqual(endpoint, "168.63.129.16") + + endpoint = get_osutil(distro_name='ubuntu', distro_version='14.04').get_dhcp_lease_endpoint() + self.assertTrue(endpoint is not None) + self.assertEqual(endpoint, "168.63.129.16") + + def test_dhcp_lease_multi(self): + with patch.object(glob, "glob", return_value=['/var/lib/dhcp/dhclient.eth0.leases']): + with patch(open_patch(), mock.mock_open(read_data=load_data("dhcp.leases.multi"))): + endpoint = get_osutil(distro_name='ubuntu', distro_version='12.04').get_dhcp_lease_endpoint() + self.assertTrue(endpoint is not None) + self.assertEqual(endpoint, "second") + +if __name__ == '__main__': + unittest.main() diff --git a/tests/common/test_version.py b/tests/common/test_version.py new file mode 100644 index 0000000..6a4dc38 --- /dev/null +++ b/tests/common/test_version.py @@ -0,0 +1,65 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import copy +import glob +import json +import os +import platform +import random +import subprocess +import sys +import tempfile +import zipfile + +from tests.protocol.mockwiredata import * +from tests.tools import * + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil + +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.version import * + + +class TestCurrentAgentName(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + return + + @patch("os.getcwd", return_value="/default/install/directory") + def test_extract_name_finds_installed(self, mock_cwd): + current_agent, current_version = set_current_agent() + self.assertEqual(AGENT_LONG_VERSION, current_agent) + self.assertEqual(AGENT_VERSION, str(current_version)) + return + + @patch("os.getcwd") + def test_extract_name_finds_latest_agent(self, mock_cwd): + path = os.path.join(conf.get_lib_dir(), "{0}-{1}".format( + AGENT_NAME, + "1.2.3")) + mock_cwd.return_value = path + agent = os.path.basename(path) + version = AGENT_NAME_PATTERN.match(agent).group(1) + current_agent, current_version = set_current_agent() + self.assertEqual(agent, current_agent) + self.assertEqual(version, str(current_version)) + return diff --git a/tests/daemon/__init__.py b/tests/daemon/__init__.py new file mode 100644 index 0000000..2ef4c16 --- /dev/null +++ b/tests/daemon/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# diff --git a/tests/distro/test_daemon.py b/tests/daemon/test_daemon.py index 9d3c45b..263af49 100644 --- a/tests/distro/test_daemon.py +++ b/tests/daemon/test_daemon.py @@ -14,14 +14,10 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.distro.loader import get_distro -from azurelinuxagent.exception import * -from azurelinuxagent.distro.default.daemon import * +from azurelinuxagent.common.exception import * +from azurelinuxagent.daemon import * class MockDaemonCall(object): def __init__(self, daemon_handler, count): @@ -38,26 +34,30 @@ class MockDaemonCall(object): @patch("time.sleep") class TestDaemon(AgentTestCase): def test_daemon_restart(self, mock_sleep): - distro = get_distro() - mock_daemon = Mock(side_effect=MockDaemonCall(distro.daemon_handler, 2)) - distro.daemon_handler.daemon = mock_daemon - distro.daemon_handler.check_pid = Mock() - distro.daemon_handler.run() + #Mock daemon function + daemon_handler = get_daemon_handler() + mock_daemon = Mock(side_effect=MockDaemonCall(daemon_handler, 2)) + daemon_handler.daemon = mock_daemon + + daemon_handler.check_pid = Mock() + + daemon_handler.run() mock_sleep.assert_any_call(15) - self.assertEquals(2, distro.daemon_handler.daemon.call_count) + self.assertEquals(2, daemon_handler.daemon.call_count) - @patch("azurelinuxagent.distro.default.daemon.conf") - @patch("azurelinuxagent.distro.default.daemon.sys.exit") + @patch("azurelinuxagent.daemon.main.conf") + @patch("azurelinuxagent.daemon.main.sys.exit") def test_check_pid(self, mock_exit, mock_conf, mock_sleep): - distro = get_distro() + daemon_handler = get_daemon_handler() + mock_pid_file = os.path.join(self.tmp_dir, "pid") mock_conf.get_agent_pid_file_path = Mock(return_value=mock_pid_file) - distro.daemon_handler.check_pid() + daemon_handler.check_pid() self.assertTrue(os.path.isfile(mock_pid_file)) - distro.daemon_handler.check_pid() + daemon_handler.check_pid() mock_exit.assert_any_call(0) if __name__ == '__main__': diff --git a/tests/data/dhcp.leases b/tests/data/dhcp.leases new file mode 100644 index 0000000..cb4f396 --- /dev/null +++ b/tests/data/dhcp.leases @@ -0,0 +1,56 @@ +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers invalid; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 never; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers expired; + option dhcp-renewal-time 4294967295; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 4 2015/06/16 16:58:54; + rebind 4 2015/06/16 16:58:54; + expire 4 2015/06/16 16:58:54; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers 168.63.129.16; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2152/07/23 23:27:10; +} diff --git a/tests/data/dhcp.leases.multi b/tests/data/dhcp.leases.multi new file mode 100644 index 0000000..cfe9c67 --- /dev/null +++ b/tests/data/dhcp.leases.multi @@ -0,0 +1,57 @@ +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers first; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2152/07/23 23:27:10; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers second; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2152/07/23 23:27:10; +} +lease { + interface "eth0"; + fixed-address 10.0.1.4; + server-name "RDE41D2D9BB18C"; + option subnet-mask 255.255.255.0; + option dhcp-lease-time 4294967295; + option routers 10.0.1.1; + option dhcp-message-type 5; + option dhcp-server-identifier 168.63.129.16; + option domain-name-servers expired; + option dhcp-renewal-time 4294967295; + option rfc3442-classless-static-routes 0,10,0,1,1,32,168,63,129,16,10,0,1,1; + option unknown-245 a8:3f:81:10; + option dhcp-rebinding-time 4294967295; + option domain-name "qylsde3bnlhu5dstzf3bav5inc.fx.internal.cloudapp.net"; + renew 0 2152/07/23 23:27:10; + rebind 0 2152/07/23 23:27:10; + expire 0 2012/07/23 23:27:10; +} diff --git a/tests/data/ext/sample_ext/sample.py b/tests/data/ext/sample_ext/sample.py index 7107ac2..74bd839 100755 --- a/tests/data/ext/sample_ext/sample.py +++ b/tests/data/ext/sample_ext/sample.py @@ -2,18 +2,19 @@ import os + def get_seq(): - latest_seq = -1; + latest_seq = -1 config_dir = os.path.join(os.getcwd(), "config") if os.path.isdir(config_dir): for item in os.listdir(config_dir): item_path = os.path.join(config_dir, item) if os.path.isfile(item_path): - seperator = item.rfind(".") - if seperator > 0 and item[seperator + 1:] == "settings": - seq = int(item[0: seperator]) - if seq > latest_seq: - latest_seq = seq + separator = item.rfind(".") + if separator > 0 and item[separator + 1:] == "settings": + sequence = int(item[0: separator]) + if sequence > latest_seq: + latest_seq = sequence return latest_seq @@ -28,6 +29,9 @@ succeed_status = """ if __name__ == "__main__": seq = get_seq() if seq >= 0: - status_file = os.path.join(os.getcwd(), "status", "{0}.status".format(seq)) + status_path = os.path.join(os.getcwd(), "status") + if not os.path.exists(status_path): + os.makedirs(status_path) + status_file = os.path.join(status_path, "{0}.status".format(seq)) with open(status_file, "w+") as status: status.write(succeed_status) diff --git a/tests/data/ga/WALinuxAgent-2.1.5.zip b/tests/data/ga/WALinuxAgent-2.1.5.zip Binary files differnew file mode 100644 index 0000000..b1f2684 --- /dev/null +++ b/tests/data/ga/WALinuxAgent-2.1.5.zip diff --git a/tests/data/wire/ext_conf.xml b/tests/data/wire/ext_conf.xml index 725271d..0b7c528 100644 --- a/tests/data/wire/ext_conf.xml +++ b/tests/data/wire/ext_conf.xml @@ -1,44 +1,24 @@ <Extensions version="1.0.0.0" goalStateIncarnation="9"><GuestAgentExtension xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <GAFamilies> <GAFamily> - <Name>Win8</Name> + <Name>Prod</Name> <Uris> - <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> - <Uri>http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + <Uri>http://manifest_of_ga.xml</Uri> </Uris> </GAFamily> <GAFamily> - <Name>Win7</Name> + <Name>Test</Name> <Uris> - <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr05.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr06.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr07.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr08.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr09.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr10.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr11.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://rdfepirv2hknprdstr12.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - <Uri>http://zrdfepirv2hk2prdstr01.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> - </Uris> + <Uri>http://manifest_of_ga.xml</Uri> + </Uris> </GAFamily> </GAFamilies> </GuestAgentExtension> <Plugins> - <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0" location="http://rdfepirv2hknprdstr03.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" config="" state="enabled" autoUpgrade="false" failoverlocation="http://rdfepirv2hknprdstr04.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" runAsStartupTask="false" isJson="true" /> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0.0" location="http://rdfepirv2hknprdstr03.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" config="" state="enabled" autoUpgrade="false" failoverlocation="http://rdfepirv2hknprdstr04.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" runAsStartupTask="false" isJson="true" /> </Plugins> <PluginSettings> - <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0"> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0.0"> <RuntimeSettings seqNo="0">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}</RuntimeSettings> </Plugin> </PluginSettings> diff --git a/tests/data/wire/ext_conf_autoupgrade.xml b/tests/data/wire/ext_conf_autoupgrade.xml new file mode 100644 index 0000000..1d6919e --- /dev/null +++ b/tests/data/wire/ext_conf_autoupgrade.xml @@ -0,0 +1,28 @@ +<Extensions version="1.0.0.0" goalStateIncarnation="9"><GuestAgentExtension xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <GAFamilies> + <GAFamily> + <Name>Win8</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + <GAFamily> + <Name>Win7</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + </GAFamilies> +</GuestAgentExtension> +<Plugins> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0.0" location="http://rdfepirv2hknprdstr03.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" config="" state="enabled" autoUpgrade="true" failoverlocation="http://rdfepirv2hknprdstr04.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" runAsStartupTask="false" isJson="true" /> +</Plugins> +<PluginSettings> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.0.0"> + <RuntimeSettings seqNo="0">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}</RuntimeSettings> + </Plugin> +</PluginSettings> +<StatusUploadBlob>https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D</StatusUploadBlob></Extensions> + diff --git a/tests/data/wire/ext_conf_autoupgrade_internalversion.xml b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml new file mode 100644 index 0000000..1e613ea --- /dev/null +++ b/tests/data/wire/ext_conf_autoupgrade_internalversion.xml @@ -0,0 +1,28 @@ +<Extensions version="1.0.0.0" goalStateIncarnation="9"><GuestAgentExtension xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <GAFamilies> + <GAFamily> + <Name>Win8</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + <GAFamily> + <Name>Win7</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + </GAFamilies> +</GuestAgentExtension> +<Plugins> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.2.0" location="http://rdfepirv2hknprdstr03.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" config="" state="enabled" autoUpgrade="true" failoverlocation="http://rdfepirv2hknprdstr04.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" runAsStartupTask="false" isJson="true" /> +</Plugins> +<PluginSettings> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.2.0"> + <RuntimeSettings seqNo="0">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}</RuntimeSettings> + </Plugin> +</PluginSettings> +<StatusUploadBlob>https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D</StatusUploadBlob></Extensions> + diff --git a/tests/data/wire/ext_conf_internalversion.xml b/tests/data/wire/ext_conf_internalversion.xml new file mode 100644 index 0000000..1e613ea --- /dev/null +++ b/tests/data/wire/ext_conf_internalversion.xml @@ -0,0 +1,28 @@ +<Extensions version="1.0.0.0" goalStateIncarnation="9"><GuestAgentExtension xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <GAFamilies> + <GAFamily> + <Name>Win8</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win8_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + <GAFamily> + <Name>Win7</Name> + <Uris> + <Uri>http://rdfepirv2hknprdstr03.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + <Uri>http://rdfepirv2hknprdstr04.blob.core.windows.net/bfd5c281a7dc4e4b84381eb0b47e3aaf/Microsoft.WindowsAzure.GuestAgent_Win7_asiaeast_manifest.xml</Uri> + </Uris> + </GAFamily> + </GAFamilies> +</GuestAgentExtension> +<Plugins> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.2.0" location="http://rdfepirv2hknprdstr03.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" config="" state="enabled" autoUpgrade="true" failoverlocation="http://rdfepirv2hknprdstr04.blob.core.windows.net/b01058962be54ceca550a390fa5ff064/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml" runAsStartupTask="false" isJson="true" /> +</Plugins> +<PluginSettings> + <Plugin name="OSTCExtensions.ExampleHandlerLinux" version="1.2.0"> + <RuntimeSettings seqNo="0">{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"4037FBF5F1F3014F99B5D6C7799E9B20E6871CB3","protectedSettings":"MIICWgYJK","publicSettings":{"foo":"bar"}}}]}</RuntimeSettings> + </Plugin> +</PluginSettings> +<StatusUploadBlob>https://yuezhatest.blob.core.windows.net/vhds/test-cs12.test-cs12.test-cs12.status?sr=b&sp=rw&se=9999-01-01&sk=key1&sv=2014-02-14&sig=hfRh7gzUE7sUtYwke78IOlZOrTRCYvkec4hGZ9zZzXo%3D</StatusUploadBlob></Extensions> + diff --git a/tests/data/wire/ga_manifest.xml b/tests/data/wire/ga_manifest.xml new file mode 100644 index 0000000..f43daf5 --- /dev/null +++ b/tests/data/wire/ga_manifest.xml @@ -0,0 +1,48 @@ +<?xml version="1.0" encoding="utf-8"?> +<PluginVersionManifest xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> + <Plugins> + <Plugin> + <Version>1.0.0</Version> + <Uris> + <Uri>http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.0.0</Uri> + </Uris> + </Plugin> + <Plugin> + <Version>1.1.0</Version> + <Uris> + <Uri>http://foo.bar/zar/OSTCExtensions.WALinuxAgent__1.1.0</Uri> + </Uris> + </Plugin> + <Plugin> + <Version>2.0.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__2.0.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.1.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__2.1.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.1.1</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__2.1.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.2.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__2.2.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>3.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__3.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>3.1</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__3.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.0.0.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__3.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.0.0.1</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__3.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.1.0.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__3.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>99999.0.0.0</Version><Uris><Uri>http://host/OSTCExtensions.WALinuxAgent__99999.0.0.0</Uri></Uris> + </Plugin> + </Plugins> +</PluginVersionManifest> + diff --git a/tests/data/wire/manifest.xml b/tests/data/wire/manifest.xml index 943755a..ff42b9d 100644 --- a/tests/data/wire/manifest.xml +++ b/tests/data/wire/manifest.xml @@ -2,17 +2,59 @@ <PluginVersionManifest xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> <Plugins> <Plugin> - <Version>1.0</Version> + <Version>1.0.0</Version> <Uris> - <Uri>http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux</Uri> + <Uri>http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.0.0</Uri> </Uris> </Plugin> <Plugin> - <Version>1.1</Version> + <Version>1.1.0</Version> <Uris> - <Uri>http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux</Uri> + <Uri>http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.1.0</Uri> </Uris> </Plugin> + <Plugin> + <Version>2.0.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.0.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.1.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.1.0</Uri></Uris> + <DisallowMajorVersionUpgrade>True</DisallowMajorVersionUpgrade> + </Plugin> + <Plugin> + <Version>2.1.1</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.1.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.2.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.2.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>3.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__3.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>3.1</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__3.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.0.0.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__3.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.0.0.1</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__3.1</Uri></Uris> + </Plugin> + <Plugin> + <Version>4.1.0.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__3.1</Uri></Uris> + </Plugin> </Plugins> + <InternalPlugins> + <Plugin> + <Version>1.2.0</Version> + <Uris> + <Uri>http://foo.bar/zar/OSTCExtensions.ExampleHandlerLinux__1.2.0</Uri> + </Uris> + </Plugin> + <Plugin> + <Version>2.3.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0</Uri></Uris> + </Plugin> + <Plugin> + <Version>2.4.0</Version><Uris><Uri>http://host/OSTCExtensions.ExampleHandlerLinux__2.3.0</Uri></Uris> + </Plugin> + </InternalPlugins> </PluginVersionManifest> diff --git a/tests/distro/__init__.py b/tests/distro/__init__.py index 9bdb27e..2ef4c16 100644 --- a/tests/distro/__init__.py +++ b/tests/distro/__init__.py @@ -14,6 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx diff --git a/tests/distro/test_loader.py b/tests/distro/test_resourceDisk.py index 94ca913..198fd49 100644 --- a/tests/distro/test_loader.py +++ b/tests/distro/test_resourceDisk.py @@ -19,18 +19,24 @@ # http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.distro.loader import get_distro -from azurelinuxagent.distro.default.distro import DefaultDistro +from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler -class TestDistroLoader(AgentTestCase): +class TestResourceDisk(AgentTestCase): + def test_mkfile(self): + # setup + test_file = os.path.join(self.tmp_dir, 'test_file') + file_size = 1024 * 128 + if os.path.exists(test_file): + os.remove(test_file) - @distros() - def test_distro_loader(self, *distro_args): - distro = get_distro(*distro_args) - self.assertNotEquals(None, distro) - self.assertNotEquals(DefaultDistro, type(distro)) - + # execute + get_resourcedisk_handler().mkfile(test_file, file_size) + + # assert + assert os.path.exists(test_file) + + # cleanup + os.remove(test_file) if __name__ == '__main__': unittest.main() - diff --git a/tests/distro/test_scvmm.py b/tests/distro/test_scvmm.py new file mode 100644 index 0000000..2437170 --- /dev/null +++ b/tests/distro/test_scvmm.py @@ -0,0 +1,84 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +# Implements parts of RFC 2131, 1541, 1497 and +# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx +# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx + +import mock +from tests.tools import * + +import azurelinuxagent.daemon.scvmm as scvmm +from azurelinuxagent.daemon.main import * +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class TestSCVMM(AgentTestCase): + def test_scvmm_detection_with_file(self): + # setup + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + conf.get_detect_scvmm_env = Mock(return_value=True) + scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) + fileutil.write_file(scvmm_file, "") + + with patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as po: + with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): + with patch('time.sleep', return_value=0): + # execute + failed = False + try: + scvmm.get_scvmm_handler().run() + except: + failed = True + # assert + self.assertTrue(failed) + self.assertTrue(po.call_count == 1) + # cleanup + os.remove(scvmm_file) + + + def test_scvmm_detection_with_multiple_cdroms(self): + # setup + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + conf.get_detect_scvmm_env = Mock(return_value=True) + + # execute + with mock.patch.object(DefaultOSUtil, 'mount_dvd') as patch_mount: + with patch('os.listdir', return_value=["sr0", "sr1", "sr2"]): + scvmm.ScvmmHandler().detect_scvmm_env() + # assert + assert patch_mount.call_count == 3 + assert patch_mount.call_args_list[0][1]['dvd_device'] == '/dev/sr0' + assert patch_mount.call_args_list[1][1]['dvd_device'] == '/dev/sr1' + assert patch_mount.call_args_list[2][1]['dvd_device'] == '/dev/sr2' + + + def test_scvmm_detection_without_file(self): + # setup + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) + conf.get_detect_scvmm_env = Mock(return_value=True) + scvmm_file = os.path.join(self.tmp_dir, scvmm.VMM_CONF_FILE_NAME) + if os.path.exists(scvmm_file): + os.remove(scvmm_file) + + with mock.patch.object(scvmm.ScvmmHandler, 'start_scvmm_agent') as patch_start: + # execute + scvmm.ScvmmHandler().detect_scvmm_env() + # assert + patch_start.assert_not_called() + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/ga/__init__.py b/tests/ga/__init__.py new file mode 100644 index 0000000..2ef4c16 --- /dev/null +++ b/tests/ga/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# diff --git a/tests/distro/test_extension.py b/tests/ga/test_extension.py index d0b631f..71c219c 100644 --- a/tests/distro/test_extension.py +++ b/tests/ga/test_extension.py @@ -14,20 +14,15 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx -from tests.tools import * from tests.protocol.mockwiredata import * -from azurelinuxagent.exception import * -from azurelinuxagent.distro.loader import get_distro -from azurelinuxagent.protocol.restapi import get_properties -from azurelinuxagent.protocol.wire import WireProtocol - -@patch("time.sleep") -@patch("azurelinuxagent.protocol.wire.CryptUtil") -@patch("azurelinuxagent.utils.restutil.http_get") +from azurelinuxagent.common.exception import * +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.ga.exthandlers import * +from azurelinuxagent.common.protocol.wire import WireProtocol + +@patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.utils.restutil.http_get") class TestExtension(AgentTestCase): def _assert_handler_status(self, report_vm_status, expected_status, @@ -42,17 +37,19 @@ class TestExtension(AgentTestCase): handler_status.name) self.assertEquals(version, handler_status.version) self.assertEquals(expected_ext_count, len(handler_status.extensions)) + return def _assert_no_handler_status(self, report_vm_status): self.assertTrue(report_vm_status.called) args, kw = report_vm_status.call_args vm_status = args[0] self.assertEquals(0, len(vm_status.vmAgent.extensionHandlers)) + return + + def _create_mock(self, test_data, mock_http_get, MockCryptUtil): + """Test enable/disable/uninstall of an extension""" + handler = get_exthandlers_handler() - def _create_mock(self, test_data, mock_http_get, MockCryptUtil, _): - """Test enable/disable/unistall of an extension""" - distro = get_distro() - #Mock protocol to return test data mock_http_get.side_effect = test_data.mock_http_get MockCryptUtil.side_effect = test_data.mock_crypt_util @@ -61,104 +58,104 @@ class TestExtension(AgentTestCase): protocol.detect() protocol.report_ext_status = MagicMock() protocol.report_vm_status = MagicMock() - distro.protocol_util.get_protocol = Mock(return_value=protocol) - - return distro, protocol + + handler.protocol_util.get_protocol = Mock(return_value=protocol) + return handler, protocol def test_ext_handler(self, *args): test_data = WireProtocolData(DATA_FILE) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) #Test enable scenario. - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 0) #Test goal state not changed - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Test goal state changed test_data.goal_state = test_data.goal_state.replace("<Incarnation>1<", "<Incarnation>2<") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"0\"", "seqNo=\"1\"") - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "success", 1) #Test upgrade test_data.goal_state = test_data.goal_state.replace("<Incarnation>2<", "<Incarnation>3<") - test_data.ext_conf = test_data.ext_conf.replace("1.0", "1.1") + test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") test_data.ext_conf = test_data.ext_conf.replace("seqNo=\"1\"", "seqNo=\"2\"") - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.1.0") self._assert_ext_status(protocol.report_ext_status, "success", 2) #Test disable test_data.goal_state = test_data.goal_state.replace("<Incarnation>3<", "<Incarnation>4<") test_data.ext_conf = test_data.ext_conf.replace("enabled", "disabled") - distro.ext_handlers_handler.run() + exthandlers_handler.run() self._assert_handler_status(protocol.report_vm_status, "NotReady", - 1, "1.1") + 1, "1.1.0") #Test uninstall test_data.goal_state = test_data.goal_state.replace("<Incarnation>4<", "<Incarnation>5<") test_data.ext_conf = test_data.ext_conf.replace("disabled", "uninstall") - distro.ext_handlers_handler.run() + exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) #Test uninstall again! test_data.goal_state = test_data.goal_state.replace("<Incarnation>5<", "<Incarnation>6<") - distro.ext_handlers_handler.run() + exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) def test_ext_handler_no_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_SETTINGS) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 0, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 0, "1.0.0") def test_ext_handler_no_public_settings(self, *args): test_data = WireProtocolData(DATA_FILE_EXT_NO_PUBLIC) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") def test_ext_handler_no_ext(self, *args): test_data = WireProtocolData(DATA_FILE_NO_EXT) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) #Assert no extension handler status - distro.ext_handlers_handler.run() + exthandlers_handler.run() self._assert_no_handler_status(protocol.report_vm_status) - @patch('azurelinuxagent.distro.default.extension.add_event') + @patch('azurelinuxagent.ga.exthandlers.add_event') def test_ext_handler_download_failure(self, mock_add_event, *args): test_data = WireProtocolData(DATA_FILE) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) protocol.download_ext_handler_pkg = Mock(side_effect=ProtocolError) - distro.ext_handlers_handler.run() + exthandlers_handler.run() args, kw = mock_add_event.call_args self.assertEquals(False, kw['is_success']) self.assertEquals("OSTCExtensions.ExampleHandlerLinux", kw['name']) self.assertEquals("Download", kw['op']) - @patch('azurelinuxagent.distro.default.extension.fileutil') + @patch('azurelinuxagent.ga.exthandlers.fileutil') def test_ext_handler_io_error(self, mock_fileutil, *args): test_data = WireProtocolData(DATA_FILE) - distro, protocol = self._create_mock(test_data, *args) + exthandlers_handler, protocol = self._create_mock(test_data, *args) mock_fileutil.write_file.return_value = IOError("Mock IO Error") - distro.ext_handlers_handler.run() + exthandlers_handler.run() def _assert_ext_status(self, report_ext_status, expected_status, expected_seq_no): @@ -170,21 +167,96 @@ class TestExtension(AgentTestCase): def test_ext_handler_no_reporting_status(self, *args): test_data = WireProtocolData(DATA_FILE) - distro, protocol = self._create_mock(test_data, *args) - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler, protocol = self._create_mock(test_data, *args) + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") #Remove status file and re-run collecting extension status status_file = os.path.join(self.tmp_dir, - "OSTCExtensions.ExampleHandlerLinux-1.0", + "OSTCExtensions.ExampleHandlerLinux-1.0.0", "status", "0.status") self.assertTrue(os.path.isfile(status_file)) os.remove(status_file) - distro.ext_handlers_handler.run() - self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0") + exthandlers_handler.run() + self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0") self._assert_ext_status(protocol.report_ext_status, "error", 0) + def test_ext_handler_version_decide_autoupgrade_internalversion(self, *args): + for internal in [False, True]: + for autoupgrade in [False, True]: + if internal: + config_version = '1.2.0' + decision_version = '1.2.0' + if autoupgrade: + datafile = DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION + else: + datafile = DATA_FILE_EXT_INTERNALVERSION + else: + config_version = '1.0.0' + if autoupgrade: + datafile = DATA_FILE_EXT_AUTOUPGRADE + decision_version = '1.1.0' + else: + datafile = DATA_FILE + decision_version = '1.0.0' + + _, protocol = self._create_mock(WireProtocolData(datafile), *args) + ext_handlers, _ = protocol.get_ext_handlers() + self.assertEqual(1, len(ext_handlers.extHandlers)) + ext_handler = ext_handlers.extHandlers[0] + self.assertEqual('OSTCExtensions.ExampleHandlerLinux', ext_handler.name) + self.assertEqual(config_version, ext_handler.properties.version, "config version.") + ExtHandlerInstance(ext_handler, protocol).decide_version() + self.assertEqual(decision_version, ext_handler.properties.version, "decision version.") + + def test_ext_handler_version_decide_between_minor_versions(self, *args): + """ + Using v2.x~v4.x for unit testing + Available versions via manifest XML (I stands for internal): + 2.0.0, 2.1.0, 2.1.1, 2.2.0, 2.3.0(I), 2.4.0(I), 3.0, 3.1, 4.0.0.0, 4.0.0.1, 4.1.0.0 + See tests/data/wire/manifest.xml for possible versions + """ + + # (installed_version, config_version, exptected_version, autoupgrade_expected_version) + cases = [ + (None, '2.0', '2.0.0', '2.2.0'), + (None, '2.0.0', '2.0.0', '2.2.0'), + ('1.0', '1.0.0', '1.0.0', '1.1.0'), + (None, '2.1.0', '2.1.1', '2.2.0'), + (None, '2.2.0', '2.2.0', '2.2.0'), + (None, '2.3.0', '2.3.0', '2.3.0'), + (None, '2.4.0', '2.4.0', '2.4.0'), + (None, '3.0', '3.0', '3.1'), + (None, '4.0', '4.0.0.1', '4.1.0.0'), + ] + + _, protocol = self._create_mock(WireProtocolData(DATA_FILE), *args) + version_uri = Mock() + version_uri.uri = 'http://some/Microsoft.OSTCExtensions_ExampleHandlerLinux_asiaeast_manifest.xml' + + for (installed_version, config_version, expected_version, autoupgrade_expected_version) in cases: + ext_handler = Mock() + ext_handler.properties = Mock() + ext_handler.name = 'OSTCExtensions.ExampleHandlerLinux' + ext_handler.versionUris = [version_uri] + ext_handler.properties.version = config_version + + ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) + ext_handler_instance.get_installed_version = Mock(return_value=installed_version) + + ext_handler_instance.decide_version() + self.assertEqual(expected_version, ext_handler.properties.version) + + ext_handler.properties.version = config_version + ext_handler.properties.upgradePolicy = 'auto' + + ext_handler_instance = ExtHandlerInstance(ext_handler, protocol) + ext_handler_instance.get_installed_version = Mock(return_value=installed_version) + + ext_handler_instance.decide_version() + self.assertEqual(autoupgrade_expected_version, ext_handler.properties.version) + if __name__ == '__main__': unittest.main() diff --git a/tests/distro/test_monitor.py b/tests/ga/test_monitor.py index 1dd7740..838d037 100644 --- a/tests/distro/test_monitor.py +++ b/tests/ga/test_monitor.py @@ -14,13 +14,10 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.exception import * -from azurelinuxagent.distro.default.monitor import * +from azurelinuxagent.common.exception import * +from azurelinuxagent.ga.monitor import * class TestMonitor(AgentTestCase): def test_parse_xml_event(self): diff --git a/tests/ga/test_update.py b/tests/ga/test_update.py new file mode 100644 index 0000000..74804fb --- /dev/null +++ b/tests/ga/test_update.py @@ -0,0 +1,1135 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from __future__ import print_function + +import copy +import glob +import json +import os +import platform +import random +import subprocess +import sys +import tempfile +import zipfile + +from tests.protocol.mockwiredata import * +from tests.tools import * + +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil + +from azurelinuxagent.common.exception import UpdateError +from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION +from azurelinuxagent.ga.update import * + +NO_ERROR = { + "last_failure" : 0.0, + "failure_count" : 0, + "was_fatal" : False +} + +WITH_ERROR = { + "last_failure" : 42.42, + "failure_count" : 2, + "was_fatal" : False +} + +EMPTY_MANIFEST = { + "name": "WALinuxAgent", + "version": 1.0, + "handlerManifest": { + "installCommand": "", + "uninstallCommand": "", + "updateCommand": "", + "enableCommand": "", + "disableCommand": "", + "rebootAfterInstall": False, + "reportHeartbeat": False + } +} + + +def get_agent_pkgs(in_dir=os.path.join(data_dir, "ga")): + path = os.path.join(in_dir, AGENT_PKG_GLOB) + return glob.glob(path) + + +def get_agents(in_dir=os.path.join(data_dir, "ga")): + path = os.path.join(in_dir, AGENT_DIR_GLOB) + return [a for a in glob.glob(path) if os.path.isdir(a)] + + +def get_agent_file_path(): + return get_agent_pkgs()[0] + + +def get_agent_file_name(): + return os.path.basename(get_agent_file_path()) + + +def get_agent_path(): + return fileutil.trim_ext(get_agent_file_path(), "zip") + + +def get_agent_name(): + return os.path.basename(get_agent_path()) + + +def get_agent_version(): + return FlexibleVersion(get_agent_name().split("-")[1]) + + +def faux_logger(): + print("STDOUT message") + print("STDERR message", file=sys.stderr) + return DEFAULT + + +class UpdateTestCase(AgentTestCase): + def setUp(self): + AgentTestCase.setUp(self) + return + + def agent_bin(self, version): + return "bin/{0}-{1}.egg".format(AGENT_NAME, version) + + def agent_count(self): + return len(self.agent_dirs()) + + def agent_dirs(self): + return get_agents(in_dir=self.tmp_dir) + + def agent_dir(self, version): + return os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, version)) + + def agent_paths(self): + paths = glob.glob(os.path.join(self.tmp_dir, "*")) + paths.sort() + return paths + + def agent_pkgs(self): + return get_agent_pkgs(in_dir=self.tmp_dir) + + def agent_versions(self): + v = [FlexibleVersion(AGENT_DIR_PATTERN.match(a).group(1)) for a in self.agent_dirs()] + v.sort(reverse=True) + return v + + def get_error_file(self, error_data=NO_ERROR): + fp = tempfile.NamedTemporaryFile(mode="w") + json.dump(error_data if error_data is not None else NO_ERROR, fp) + fp.seek(0) + return fp + + def create_error(self, error_data=NO_ERROR): + with self.get_error_file(error_data) as path: + return GuestAgentError(path.name) + + def copy_agents(self, *agents): + if len(agents) <= 0: + agents = get_agent_pkgs() + for agent in agents: + fileutil.copy_file(agent, to_dir=self.tmp_dir) + return + + def expand_agents(self): + for agent in self.agent_pkgs(): + zipfile.ZipFile(agent).extractall(os.path.join( + self.tmp_dir, + fileutil.trim_ext(agent, "zip"))) + return + + def prepare_agents(self, base_version=AGENT_VERSION, count=5, is_available=True): + base_v = FlexibleVersion(base_version) + + # Ensure the test data is copied over + agent_count = self.agent_count() + if agent_count <= 0: + self.copy_agents(get_agent_pkgs()[0]) + self.expand_agents() + count -= 1 + + # Determine the most recent agent version + versions = self.agent_versions() + src_v = FlexibleVersion(str(versions[0])) + + # If the most recent agent is newer the minimum requested, use the agent version + if base_v < src_v: + base_v = src_v + + # Create agent packages and directories + return self.replicate_agents( + src_v=src_v, + count=count-agent_count, + is_available=is_available) + + def remove_agents(self): + for agent in self.agent_paths(): + try: + if os.path.isfile(agent): + os.remove(agent) + else: + shutil.rmtree(agent) + except: + pass + return + + def replicate_agents( + self, + count=5, + src_v=AGENT_VERSION, + is_available=True, + increment=1): + from_path = self.agent_dir(src_v) + dst_v = FlexibleVersion(str(src_v)) + for i in range(0,count): + dst_v += increment + to_path = self.agent_dir(dst_v) + shutil.copyfile(from_path + ".zip", to_path + ".zip") + shutil.copytree(from_path, to_path) + shutil.move( + os.path.join(to_path, self.agent_bin(src_v)), + os.path.join(to_path, self.agent_bin(dst_v))) + if not is_available: + GuestAgent(to_path).mark_failure(is_fatal=True) + + return dst_v + + +class TestGuestAgentError(UpdateTestCase): + def test_creation(self): + self.assertRaises(TypeError, GuestAgentError) + self.assertRaises(UpdateError, GuestAgentError, None) + + with self.get_error_file(error_data=WITH_ERROR) as path: + err = GuestAgentError(path.name) + self.assertEqual(path.name, err.path) + self.assertNotEqual(None, err) + + self.assertEqual(WITH_ERROR["last_failure"], err.last_failure) + self.assertEqual(WITH_ERROR["failure_count"], err.failure_count) + self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal) + return + + def test_clear(self): + with self.get_error_file(error_data=WITH_ERROR) as path: + err = GuestAgentError(path.name) + self.assertEqual(path.name, err.path) + self.assertNotEqual(None, err) + + err.clear() + self.assertEqual(NO_ERROR["last_failure"], err.last_failure) + self.assertEqual(NO_ERROR["failure_count"], err.failure_count) + self.assertEqual(NO_ERROR["was_fatal"], err.was_fatal) + return + + def test_load_preserves_error_state(self): + with self.get_error_file(error_data=WITH_ERROR) as path: + err = GuestAgentError(path.name) + self.assertEqual(path.name, err.path) + self.assertNotEqual(None, err) + + with self.get_error_file(error_data=NO_ERROR): + err.load() + self.assertEqual(WITH_ERROR["last_failure"], err.last_failure) + self.assertEqual(WITH_ERROR["failure_count"], err.failure_count) + self.assertEqual(WITH_ERROR["was_fatal"], err.was_fatal) + return + + def test_save(self): + err1 = self.create_error() + err1.mark_failure() + err1.mark_failure(is_fatal=True) + + err2 = self.create_error(err1.to_json()) + self.assertEqual(err1.last_failure, err2.last_failure) + self.assertEqual(err1.failure_count, err2.failure_count) + self.assertEqual(err1.was_fatal, err2.was_fatal) + + def test_mark_failure(self): + err = self.create_error() + self.assertFalse(err.is_blacklisted) + + for i in range(0, MAX_FAILURE): + err.mark_failure() + + # Agent failed >= MAX_FAILURE, it should be blacklisted + self.assertTrue(err.is_blacklisted) + self.assertEqual(MAX_FAILURE, err.failure_count) + + # Clear old failure does not clear recent failure + err.clear_old_failure() + self.assertTrue(err.is_blacklisted) + + # Clear does remove old, outdated failures + err.last_failure -= RETAIN_INTERVAL * 2 + err.clear_old_failure() + self.assertFalse(err.is_blacklisted) + return + + def test_mark_failure_permanent(self): + err = self.create_error() + + self.assertFalse(err.is_blacklisted) + + # Fatal errors immediately blacklist + err.mark_failure(is_fatal=True) + self.assertTrue(err.is_blacklisted) + self.assertTrue(err.failure_count < MAX_FAILURE) + return + + def test_str(self): + err = self.create_error(error_data=NO_ERROR) + s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( + NO_ERROR["last_failure"], + NO_ERROR["failure_count"], + NO_ERROR["was_fatal"]) + self.assertEqual(s, str(err)) + + err = self.create_error(error_data=WITH_ERROR) + s = "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( + WITH_ERROR["last_failure"], + WITH_ERROR["failure_count"], + WITH_ERROR["was_fatal"]) + self.assertEqual(s, str(err)) + return + + +class TestGuestAgent(UpdateTestCase): + def setUp(self): + UpdateTestCase.setUp(self) + self.copy_agents(get_agent_file_path()) + self.agent_path = os.path.join(self.tmp_dir, get_agent_name()) + return + + def tearDown(self): + self.remove_agents() + return + + def test_creation(self): + self.assertRaises(UpdateError, GuestAgent, "A very bad file name") + n = "{0}-a.bad.version".format(AGENT_NAME) + self.assertRaises(UpdateError, GuestAgent, n) + + agent = GuestAgent(path=self.agent_path) + self.assertNotEqual(None, agent) + self.assertEqual(get_agent_name(), agent.name) + self.assertEqual(get_agent_version(), agent.version) + + self.assertEqual(self.agent_path, agent.get_agent_dir()) + + path = os.path.join(self.agent_path, AGENT_MANIFEST_FILE) + self.assertEqual(path, agent.get_agent_manifest_path()) + + self.assertEqual( + os.path.join(self.agent_path, AGENT_ERROR_FILE), + agent.get_agent_error_file()) + + path = ".".join((os.path.join(conf.get_lib_dir(), get_agent_name()), "zip")) + self.assertEqual(path, agent.get_agent_pkg_path()) + + self.assertTrue(agent.is_downloaded) + # Note: Agent will get blacklisted since the package for this test is invalid + self.assertTrue(agent.is_blacklisted) + self.assertFalse(agent.is_available) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_clear_error(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + agent.mark_failure(is_fatal=True) + + self.assertTrue(agent.error.last_failure > 0.0) + self.assertEqual(1, agent.error.failure_count) + self.assertTrue(agent.is_blacklisted) + self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) + + agent.clear_error() + self.assertEqual(0.0, agent.error.last_failure) + self.assertEqual(0, agent.error.failure_count) + self.assertFalse(agent.is_blacklisted) + self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_is_available(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + + self.assertFalse(agent.is_available) + agent._unpack() + self.assertTrue(agent.is_available) + + agent.mark_failure(is_fatal=True) + self.assertFalse(agent.is_available) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_is_blacklisted(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_blacklisted) + + agent._unpack() + self.assertFalse(agent.is_blacklisted) + self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) + + agent.mark_failure(is_fatal=True) + self.assertTrue(agent.is_blacklisted) + self.assertEqual(agent.is_blacklisted, agent.error.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_is_downloaded(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(agent.is_downloaded) + agent._unpack() + self.assertTrue(agent.is_downloaded) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_mark_failure(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + agent.mark_failure() + self.assertEqual(1, agent.error.failure_count) + + agent.mark_failure(is_fatal=True) + self.assertEqual(2, agent.error.failure_count) + self.assertTrue(agent.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_unpack(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(os.path.isdir(agent.get_agent_dir())) + agent._unpack() + self.assertTrue(os.path.isdir(agent.get_agent_dir())) + self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_unpack_fail(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(os.path.isdir(agent.get_agent_dir())) + os.remove(agent.get_agent_pkg_path()) + self.assertRaises(UpdateError, agent._unpack) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_load_manifest(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + agent._unpack() + agent._load_manifest() + self.assertEqual(agent.manifest.get_enable_command(), agent.get_agent_cmd()) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_load_manifest_missing(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(os.path.isdir(agent.get_agent_dir())) + agent._unpack() + os.remove(agent.get_agent_manifest_path()) + self.assertRaises(UpdateError, agent._load_manifest) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_load_manifest_is_empty(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(os.path.isdir(agent.get_agent_dir())) + agent._unpack() + self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) + + with open(agent.get_agent_manifest_path(), "w") as file: + json.dump(EMPTY_MANIFEST, file) + self.assertRaises(UpdateError, agent._load_manifest) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + def test_load_manifest_is_malformed(self, mock_ensure): + agent = GuestAgent(path=self.agent_path) + self.assertFalse(os.path.isdir(agent.get_agent_dir())) + agent._unpack() + self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) + + with open(agent.get_agent_manifest_path(), "w") as file: + file.write("This is not JSON data") + self.assertRaises(UpdateError, agent._load_manifest) + return + + def test_load_error(self): + agent = GuestAgent(path=self.agent_path) + agent.error = None + + agent._load_error() + self.assertTrue(agent.error is not None) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_download(self, mock_http_get, mock_ensure): + self.remove_agents() + self.assertFalse(os.path.isdir(self.agent_path)) + + agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) + mock_http_get.return_value= ResponseMock(response=agent_pkg) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + agent._download() + + self.assertTrue(os.path.isfile(agent.get_agent_pkg_path())) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._ensure_downloaded") + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_download_fail(self, mock_http_get, mock_ensure): + self.remove_agents() + self.assertFalse(os.path.isdir(self.agent_path)) + + mock_http_get.return_value= ResponseMock(status=restutil.httpclient.SERVICE_UNAVAILABLE) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertRaises(UpdateError, agent._download) + self.assertFalse(os.path.isfile(agent.get_agent_pkg_path())) + self.assertFalse(agent.is_downloaded) + return + + @patch("azurelinuxagent.ga.update.restutil.http_get") + def test_ensure_downloaded(self, mock_http_get): + self.remove_agents() + self.assertFalse(os.path.isdir(self.agent_path)) + + agent_pkg = load_bin_data(os.path.join("ga", get_agent_file_name())) + mock_http_get.return_value= ResponseMock(response=agent_pkg) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertTrue(os.path.isfile(agent.get_agent_manifest_path())) + self.assertTrue(agent.is_downloaded) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._download", side_effect=UpdateError) + def test_ensure_downloaded_download_fails(self, mock_download): + self.remove_agents() + self.assertFalse(os.path.isdir(self.agent_path)) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertEqual(1, agent.error.failure_count) + self.assertFalse(agent.error.was_fatal) + self.assertFalse(agent.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._download") + @patch("azurelinuxagent.ga.update.GuestAgent._unpack", side_effect=UpdateError) + def test_ensure_downloaded_unpack_fails(self, mock_download, mock_unpack): + self.assertFalse(os.path.isdir(self.agent_path)) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertEqual(1, agent.error.failure_count) + self.assertTrue(agent.error.was_fatal) + self.assertTrue(agent.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._download") + @patch("azurelinuxagent.ga.update.GuestAgent._unpack") + @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest", side_effect=UpdateError) + def test_ensure_downloaded_load_manifest_fails(self, mock_download, mock_unpack, mock_manifest): + self.assertFalse(os.path.isdir(self.agent_path)) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertEqual(1, agent.error.failure_count) + self.assertTrue(agent.error.was_fatal) + self.assertTrue(agent.is_blacklisted) + return + + @patch("azurelinuxagent.ga.update.GuestAgent._download") + @patch("azurelinuxagent.ga.update.GuestAgent._unpack") + @patch("azurelinuxagent.ga.update.GuestAgent._load_manifest") + def test_ensure_download_skips_blacklisted(self, mock_download, mock_unpack, mock_manifest): + agent = GuestAgent(path=self.agent_path) + agent.clear_error() + agent.mark_failure(is_fatal=True) + + pkg = ExtHandlerPackage(version=str(get_agent_version())) + pkg.uris.append(ExtHandlerPackageUri()) + agent = GuestAgent(pkg=pkg) + + self.assertEqual(1, agent.error.failure_count) + self.assertTrue(agent.error.was_fatal) + self.assertTrue(agent.is_blacklisted) + self.assertEqual(0, mock_download.call_count) + self.assertEqual(0, mock_unpack.call_count) + self.assertEqual(0, mock_manifest.call_count) + return + + +class TestUpdate(UpdateTestCase): + def setUp(self): + UpdateTestCase.setUp(self) + self.update_handler = get_update_handler() + return + + def test_creation(self): + self.assertTrue(self.update_handler.running) + + self.assertEqual(None, self.update_handler.last_etag) + self.assertEqual(None, self.update_handler.last_attempt_time) + + self.assertEqual(0, len(self.update_handler.agents)) + + self.assertEqual(None, self.update_handler.child_agent) + self.assertEqual(None, self.update_handler.child_launch_time) + self.assertEqual(0, self.update_handler.child_launch_attempts) + self.assertEqual(None, self.update_handler.child_process) + + self.assertEqual(None, self.update_handler.signal_handler) + return + + def _test_ensure_latest_agent( + self, + base_version=FlexibleVersion(AGENT_VERSION), + protocol=None, + versions=None): + + latest_version = self.prepare_agents() + if versions is None or len(versions) <= 0: + versions = [latest_version] + + etag = self.update_handler.last_etag if self.update_handler.last_etag is not None else 42 + if protocol is None: + protocol = ProtocolMock(etag=etag, versions=versions) + self.update_handler.protocol_util = protocol + conf.get_autoupdate_gafamily = Mock(return_value=protocol.family) + + return self.update_handler._ensure_latest_agent(base_version=base_version) + + def test_ensure_latest_agent_returns_true_on_first_use(self): + self.assertEqual(None, self.update_handler.last_etag) + self.assertTrue(self._test_ensure_latest_agent()) + return + + def test_ensure_latest_agent_includes_old_agents(self): + self.prepare_agents() + + old_count = FlexibleVersion(AGENT_VERSION).version[-1] + old_version = self.agent_versions()[-1] + + self.replicate_agents(src_v=old_version, count=old_count, increment=-1) + all_count = len(self.agent_versions()) + + self.assertTrue(self._test_ensure_latest_agent(versions=self.agent_versions())) + self.assertEqual(all_count, len(self.update_handler.agents)) + return + + def test_ensure_lastest_agent_purges_old_agents(self): + self.prepare_agents() + agent_count = self.agent_count() + self.assertEqual(5, agent_count) + + agent_versions = self.agent_versions()[:3] + self.assertTrue(self._test_ensure_latest_agent(versions=agent_versions)) + self.assertEqual(len(agent_versions), len(self.update_handler.agents)) + self.assertEqual(agent_versions, self.agent_versions()) + return + + def test_ensure_latest_agent_skips_if_too_frequent(self): + conf.get_autoupdate_frequency = Mock(return_value=10000) + self.update_handler.last_attempt_time = time.time() + self.assertFalse(self._test_ensure_latest_agent()) + return + + def test_ensure_latest_agent_skips_when_etag_matches(self): + self.update_handler.last_etag = 42 + self.assertFalse(self._test_ensure_latest_agent()) + return + + def test_ensure_latest_agent_skips_if_when_no_new_versions(self): + self.prepare_agents() + base_version = self.agent_versions()[0] + 1 + self.assertFalse(self._test_ensure_latest_agent(base_version=base_version)) + return + + def test_ensure_latest_agent_skips_when_no_versions(self): + self.assertFalse(self._test_ensure_latest_agent(protocol=ProtocolMock())) + return + + def test_ensure_latest_agent_skips_when_updates_are_disabled(self): + conf.get_autoupdate_enabled = Mock(return_value=False) + self.assertFalse(self._test_ensure_latest_agent()) + return + + def test_ensure_latest_agent_sorts(self): + self.prepare_agents() + self._test_ensure_latest_agent() + + v = FlexibleVersion("100000") + for a in self.update_handler.agents: + self.assertTrue(v > a.version) + v = a.version + return + + def _test_evaluate_agent_health(self, child_agent_index=0): + self.prepare_agents() + + latest_agent = self.update_handler.get_latest_agent() + self.assertTrue(latest_agent.is_available) + self.assertFalse(latest_agent.is_blacklisted) + self.assertTrue(len(self.update_handler.agents) > 1) + + child_agent = self.update_handler.agents[child_agent_index] + self.assertTrue(child_agent.is_available) + self.assertFalse(child_agent.is_blacklisted) + self.update_handler.child_agent = child_agent + + self.update_handler._evaluate_agent_health(latest_agent) + return + + def test_evaluate_agent_health_ignores_installed_agent(self): + self.update_handler._evaluate_agent_health(None) + return + + def test_evaluate_agent_health_raises_exception_for_restarting_agent(self): + self.update_handler.child_launch_time = time.time() - (4 * 60) + self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 + self.assertRaises(Exception, self._test_evaluate_agent_health) + return + + def test_evaluate_agent_health_will_not_raise_exception_for_long_restarts(self): + self.update_handler.child_launch_time = time.time() - 24 * 60 + self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX + self._test_evaluate_agent_health() + return + + def test_evaluate_agent_health_will_not_raise_exception_too_few_restarts(self): + self.update_handler.child_launch_time = time.time() + self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 2 + self._test_evaluate_agent_health() + return + + def test_evaluate_agent_health_resets_with_new_agent(self): + self.update_handler.child_launch_time = time.time() - (4 * 60) + self.update_handler.child_launch_attempts = CHILD_LAUNCH_RESTART_MAX - 1 + self._test_evaluate_agent_health(child_agent_index=1) + self.assertEqual(1, self.update_handler.child_launch_attempts) + return + + def test_filter_blacklisted_agents(self): + self.prepare_agents() + + self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) + self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) + + kept_agents = self.update_handler.agents[1::2] + blacklisted_agents = self.update_handler.agents[::2] + for agent in blacklisted_agents: + agent.mark_failure(is_fatal=True) + self.update_handler._filter_blacklisted_agents() + self.assertEqual(kept_agents, self.update_handler.agents) + return + + def test_get_latest_agent(self): + latest_version = self.prepare_agents() + + latest_agent = self.update_handler.get_latest_agent() + self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) + self.assertEqual(latest_version, latest_agent.version) + return + + def test_get_latest_agent_no_updates(self): + self.assertEqual(None, self.update_handler.get_latest_agent()) + return + + def test_get_latest_agent_skip_updates(self): + conf.get_autoupdate_enabled = Mock(return_value=False) + self.assertEqual(None, self.update_handler.get_latest_agent()) + return + + def test_get_latest_agent_skips_unavailable(self): + self.prepare_agents() + prior_agent = self.update_handler.get_latest_agent() + + latest_version = self.prepare_agents(count=self.agent_count()+1, is_available=False) + latest_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, latest_version)) + self.assertFalse(GuestAgent(latest_path).is_available) + + latest_agent = self.update_handler.get_latest_agent() + self.assertTrue(latest_agent.version < latest_version) + self.assertEqual(latest_agent.version, prior_agent.version) + return + + def test_load_agents(self): + self.prepare_agents() + + self.assertTrue(0 <= len(self.update_handler.agents)) + self.update_handler._load_agents() + self.assertEqual(len(get_agents(self.tmp_dir)), len(self.update_handler.agents)) + return + + def test_load_agents_does_not_reload(self): + self.prepare_agents() + + self.update_handler._load_agents() + agents = self.update_handler.agents + + self.update_handler._load_agents() + self.assertEqual(agents, self.update_handler.agents) + return + + def test_load_agents_sorts(self): + self.prepare_agents() + self.update_handler._load_agents() + + v = FlexibleVersion("100000") + for a in self.update_handler.agents: + self.assertTrue(v > a.version) + v = a.version + return + + def test_purge_agents(self): + self.prepare_agents() + self.update_handler._load_agents() + + # Ensure at least three agents initially exist + self.assertTrue(2 < len(self.update_handler.agents)) + + # Purge every other agent + kept_agents = self.update_handler.agents[1::2] + purged_agents = self.update_handler.agents[::2] + + # Reload and assert only the kept agents remain on disk + self.update_handler.agents = kept_agents + self.update_handler._purge_agents() + self.update_handler._load_agents() + self.assertEqual( + [agent.version for agent in kept_agents], + [agent.version for agent in self.update_handler.agents]) + + # Ensure both directories and packages are removed + for agent in purged_agents: + agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) + self.assertFalse(os.path.exists(agent_path)) + self.assertFalse(os.path.exists(agent_path + ".zip")) + + # Ensure kept agent directories and packages remain + for agent in kept_agents: + agent_path = os.path.join(self.tmp_dir, "{0}-{1}".format(AGENT_NAME, agent.version)) + self.assertTrue(os.path.exists(agent_path)) + self.assertTrue(os.path.exists(agent_path + ".zip")) + return + + def _test_run_latest(self, mock_child=None, mock_time=None): + if mock_child is None: + mock_child = ChildMock() + if mock_time is None: + mock_time = TimeMock() + + with patch('subprocess.Popen', return_value=mock_child) as mock_popen: + with patch('time.time', side_effect=mock_time.time): + with patch('time.sleep', return_value=mock_time.sleep): + self.update_handler.run_latest() + self.assertEqual(1, mock_popen.call_count) + + return mock_popen.call_args + + def test_run_latest(self): + self.prepare_agents() + + agent = self.update_handler.get_latest_agent() + args, kwargs = self._test_run_latest() + cmds = shlex.split(agent.get_agent_cmd()) + if cmds[0].lower() == "python": + cmds[0] = get_python_cmd() + + self.assertEqual(args[0], cmds) + self.assertEqual(True, 'cwd' in kwargs) + self.assertEqual(agent.get_agent_dir(), kwargs['cwd']) + return + + def test_run_latest_polls_and_waits_for_success(self): + mock_child = ChildMock(return_value=None) + mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) + self._test_run_latest(mock_child=mock_child, mock_time=mock_time) + self.assertEqual(2, mock_child.poll.call_count) + self.assertEqual(1, mock_child.wait.call_count) + return + + def test_run_latest_polling_stops_at_success(self): + mock_child = ChildMock(return_value=0) + mock_time = TimeMock(time_increment=CHILD_HEALTH_INTERVAL/3) + self._test_run_latest(mock_child=mock_child, mock_time=mock_time) + self.assertEqual(1, mock_child.poll.call_count) + self.assertEqual(0, mock_child.wait.call_count) + return + + def test_run_latest_polling_stops_at_failure(self): + mock_child = ChildMock(return_value=42) + mock_time = TimeMock() + self._test_run_latest(mock_child=mock_child, mock_time=mock_time) + self.assertEqual(1, mock_child.poll.call_count) + self.assertEqual(0, mock_child.wait.call_count) + self.assertEqual(2, mock_time.time_call_count) + return + + def test_run_latest_defaults_to_current(self): + self.assertEqual(None, self.update_handler.get_latest_agent()) + + args, kwargs = self._test_run_latest() + + self.assertEqual(args[0], [get_python_cmd(), "-u", sys.argv[0], "-run-exthandlers"]) + self.assertEqual(True, 'cwd' in kwargs) + self.assertEqual(os.getcwd(), kwargs['cwd']) + return + + def test_run_latest_forwards_output(self): + try: + tempdir = tempfile.mkdtemp() + stdout_path = os.path.join(tempdir, "stdout") + stderr_path = os.path.join(tempdir, "stderr") + + with open(stdout_path, "w") as stdout: + with open(stderr_path, "w") as stderr: + saved_stdout, sys.stdout = sys.stdout, stdout + saved_stderr, sys.stderr = sys.stderr, stderr + try: + self._test_run_latest(mock_child=ChildMock(side_effect=faux_logger)) + finally: + sys.stdout = saved_stdout + sys.stderr = saved_stderr + + with open(stdout_path, "r") as stdout: + self.assertEqual(1, len(stdout.readlines())) + with open(stderr_path, "r") as stderr: + self.assertEqual(1, len(stderr.readlines())) + finally: + shutil.rmtree(tempdir, True) + return + + def test_run_latest_nonzero_code_marks_failures(self): + # logger.add_logger_appender(logger.AppenderType.STDOUT) + self.prepare_agents() + + latest_agent = self.update_handler.get_latest_agent() + self.assertTrue(latest_agent.is_available) + self.assertEqual(0.0, latest_agent.error.last_failure) + self.assertEqual(0, latest_agent.error.failure_count) + + self._test_run_latest(mock_child=ChildMock(return_value=1)) + + self.assertTrue(latest_agent.is_available) + self.assertNotEqual(0.0, latest_agent.error.last_failure) + self.assertEqual(1, latest_agent.error.failure_count) + return + + def test_run_latest_exception_blacklists(self): + # logger.add_logger_appender(logger.AppenderType.STDOUT) + self.prepare_agents() + + latest_agent = self.update_handler.get_latest_agent() + self.assertTrue(latest_agent.is_available) + self.assertEqual(0.0, latest_agent.error.last_failure) + self.assertEqual(0, latest_agent.error.failure_count) + + self._test_run_latest(mock_child=ChildMock(side_effect=Exception("Force blacklisting"))) + + self.assertFalse(latest_agent.is_available) + self.assertTrue(latest_agent.error.is_blacklisted) + self.assertNotEqual(0.0, latest_agent.error.last_failure) + self.assertEqual(1, latest_agent.error.failure_count) + return + + @patch('signal.signal') + def test_run_latest_captures_signals(self, mock_signal): + self._test_run_latest() + self.assertEqual(1, mock_signal.call_count) + return + + @patch('signal.signal') + def test_run_latest_creates_only_one_signal_handler(self, mock_signal): + self.update_handler.signal_handler = "Not None" + self._test_run_latest() + self.assertEqual(0, mock_signal.call_count) + return + + def _test_run(self, invocations=1, calls=[call.run()], enable_updates=False): + conf.get_autoupdate_enabled = Mock(return_value=enable_updates) + + # Note: + # - Python only allows mutations of objects to which a function has + # a reference. Incrementing an integer directly changes the + # reference. Incrementing an item of a list changes an item to + # which the code has a reference. + # See http://stackoverflow.com/questions/26408941/python-nested-functions-and-variable-scope + iterations = [0] + def iterator(*args, **kwargs): + iterations[0] += 1 + if iterations[0] >= invocations: + self.update_handler.running = False + return + + calls = calls * invocations + + with patch('azurelinuxagent.ga.exthandlers.get_exthandlers_handler') as mock_handler: + with patch('azurelinuxagent.ga.monitor.get_monitor_handler') as mock_monitor: + with patch('azurelinuxagent.ga.env.get_env_handler') as mock_env: + with patch('time.sleep', side_effect=iterator) as mock_sleep: + with patch('sys.exit') as mock_exit: + + self.update_handler.run() + + self.assertEqual(1, mock_handler.call_count) + self.assertEqual(mock_handler.return_value.method_calls, calls) + self.assertEqual(invocations, mock_sleep.call_count) + self.assertEqual(1, mock_monitor.call_count) + self.assertEqual(1, mock_env.call_count) + self.assertEqual(1, mock_exit.call_count) + return + + def test_run(self): + self._test_run() + return + + def test_run_keeps_running(self): + self._test_run(invocations=15) + return + + def test_run_stops_if_update_available(self): + self.update_handler._ensure_latest_agent = Mock(return_value=True) + self._test_run(invocations=0, calls=[], enable_updates=True) + return + + def test_set_agents_sets_agents(self): + self.prepare_agents() + + self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) + self.assertTrue(len(self.update_handler.agents) > 0) + self.assertEqual(len(self.agent_dirs()), len(self.update_handler.agents)) + return + + def test_set_agents_sorts_agents(self): + self.prepare_agents() + + self.update_handler._set_agents([GuestAgent(path=path) for path in self.agent_dirs()]) + + v = FlexibleVersion("100000") + for a in self.update_handler.agents: + self.assertTrue(v > a.version) + v = a.version + return + + +class ChildMock(Mock): + def __init__(self, return_value=0, side_effect=None): + Mock.__init__(self, return_value=return_value, side_effect=side_effect) + + self.poll = Mock(return_value=return_value, side_effect=side_effect) + self.wait = Mock(return_value=return_value, side_effect=side_effect) + return + + +class ProtocolMock(object): + def __init__(self, family="TestAgent", etag=42, versions=None): + self.family = family + self.etag = etag + self.versions = versions if versions is not None else [] + self.create_manifests() + self.create_packages() + return + + def create_manifests(self): + self.agent_manifests = VMAgentManifestList() + if len(self.versions) <= 0: + return + + if self.family is not None: + manifest = VMAgentManifest(family=self.family) + for i in range(0,10): + manifest_uri = "https://nowhere.msft/agent/{0}".format(i) + manifest.versionsManifestUris.append(VMAgentManifestUri(uri=manifest_uri)) + self.agent_manifests.vmAgentManifests.append(manifest) + return + + def create_packages(self): + self.agent_packages = ExtHandlerPackageList() + if len(self.versions) <= 0: + return + + for version in self.versions: + package = ExtHandlerPackage(str(version)) + for i in range(0,5): + package_uri = "https://nowhere.msft/agent_pkg/{0}".format(i) + package.uris.append(ExtHandlerPackageUri(uri=package_uri)) + self.agent_packages.versions.append(package) + return + + def get_protocol(self): + return self + + def get_vmagent_manifests(self): + return self.agent_manifests, self.etag + + def get_vmagent_pkgs(self, manifest): + return self.agent_packages + + +class ResponseMock(Mock): + def __init__(self, status=restutil.httpclient.OK, response=None): + Mock.__init__(self) + self.status = status + self.response = response + return + + def read(self): + return self.response + + +class TimeMock(Mock): + def __init__(self, time_increment=1): + Mock.__init__(self) + self.next_time = time.time() + self.time_call_count = 0 + self.time_increment = time_increment + + self.sleep = Mock(return_value=0) + return + + def time(self): + self.time_call_count += 1 + current_time = self.next_time + self.next_time += self.time_increment + return current_time + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/pa/__init__.py b/tests/pa/__init__.py new file mode 100644 index 0000000..2ef4c16 --- /dev/null +++ b/tests/pa/__init__.py @@ -0,0 +1,16 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# diff --git a/tests/distro/test_provision.py b/tests/pa/test_provision.py index 60249ce..6508017 100644 --- a/tests/distro/test_provision.py +++ b/tests/pa/test_provision.py @@ -14,33 +14,33 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.distro.loader import get_distro -from azurelinuxagent.distro.default.protocolUtil import * -import azurelinuxagent.utils.fileutil as fileutil +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.protocol import OVF_FILE_NAME +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.provision import get_provision_handler class TestProvision(AgentTestCase): - + @distros("redhat") def test_provision(self, distro_name, distro_version, distro_full_name): - distro = get_distro(distro_name, distro_version, distro_full_name) - distro.osutil = MagicMock() - distro.osutil.decode_customdata = Mock(return_value="") - - distro.protocol_util.detect_protocol_by_file = MagicMock() - distro.protocol_util.get_protocol = MagicMock() + provision_handler = get_provision_handler(distro_name, distro_version, + distro_full_name) + mock_osutil = MagicMock() + mock_osutil.decode_customdata = Mock(return_value="") + + provision_handler.osutil = mock_osutil + provision_handler.protocol_util.osutil = mock_osutil + provision_handler.protocol_util.get_protocol_by_file = MagicMock() + provision_handler.protocol_util.get_protocol = MagicMock() + conf.get_dvd_mount_point = Mock(return_value=self.tmp_dir) - ovfenv_file = os.path.join(self.tmp_dir, OVF_FILE_NAME) ovfenv_data = load_data("ovf-env.xml") fileutil.write_file(ovfenv_file, ovfenv_data) - handler = distro.provision_handler - handler.run() + provision_handler.run() if __name__ == '__main__': unittest.main() diff --git a/tests/protocol/__init__.py b/tests/protocol/__init__.py index 9bdb27e..2ef4c16 100644 --- a/tests/protocol/__init__.py +++ b/tests/protocol/__init__.py @@ -14,6 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx diff --git a/tests/protocol/mockmetadata.py b/tests/protocol/mockmetadata.py index 0f7b568..dce3367 100644 --- a/tests/protocol/mockmetadata.py +++ b/tests/protocol/mockmetadata.py @@ -14,13 +14,10 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.future import httpclient -from azurelinuxagent.utils.cryptutil import CryptUtil +from azurelinuxagent.common.future import httpclient +from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "identity": "metadata/identity.json", diff --git a/tests/protocol/mockwiredata.py b/tests/protocol/mockwiredata.py index 6ffd19c..c789de5 100644 --- a/tests/protocol/mockwiredata.py +++ b/tests/protocol/mockwiredata.py @@ -14,13 +14,10 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.future import httpclient -from azurelinuxagent.utils.cryptutil import CryptUtil +from azurelinuxagent.common.future import httpclient +from azurelinuxagent.common.utils.cryptutil import CryptUtil DATA_FILE = { "version_info": "wire/version_info.xml", @@ -30,6 +27,7 @@ DATA_FILE = { "certs": "wire/certs.xml", "ext_conf": "wire/ext_conf.xml", "manifest": "wire/manifest.xml", + "ga_manifest" : "wire/ga_manifest.xml", "trans_prv": "wire/trans_prv", "trans_cert": "wire/trans_cert", "test_ext": "ext/sample_ext.zip" @@ -44,6 +42,15 @@ DATA_FILE_EXT_NO_SETTINGS["ext_conf"] = "wire/ext_conf_no_settings.xml" DATA_FILE_EXT_NO_PUBLIC = DATA_FILE.copy() DATA_FILE_EXT_NO_PUBLIC["ext_conf"] = "wire/ext_conf_no_public.xml" +DATA_FILE_EXT_AUTOUPGRADE = DATA_FILE.copy() +DATA_FILE_EXT_AUTOUPGRADE["ext_conf"] = "wire/ext_conf_autoupgrade.xml" + +DATA_FILE_EXT_INTERNALVERSION = DATA_FILE.copy() +DATA_FILE_EXT_INTERNALVERSION["ext_conf"] = "wire/ext_conf_internalversion.xml" + +DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION = DATA_FILE.copy() +DATA_FILE_EXT_AUTOUPGRADE_INTERNALVERSION["ext_conf"] = "wire/ext_conf_autoupgrade_internalversion.xml" + class WireProtocolData(object): def __init__(self, data_files=DATA_FILE): self.version_info = load_data(data_files.get("version_info")) @@ -53,6 +60,7 @@ class WireProtocolData(object): self.certs = load_data(data_files.get("certs")) self.ext_conf = load_data(data_files.get("ext_conf")) self.manifest = load_data(data_files.get("manifest")) + self.ga_manifest = load_data(data_files.get("ga_manifest")) self.trans_prv = load_data(data_files.get("trans_prv")) self.trans_cert = load_data(data_files.get("trans_cert")) self.ext = load_bin_data(data_files.get("test_ext")) @@ -73,6 +81,8 @@ class WireProtocolData(object): content = self.ext_conf elif "manifest.xml" in url: content = self.manifest + elif "manifest_of_ga.xml" in url: + content = self.ga_manifest elif "ExampleHandlerLinux" in url: content = self.ext resp = MagicMock() diff --git a/tests/protocol/test_hostplugin.py b/tests/protocol/test_hostplugin.py new file mode 100644 index 0000000..65c8465 --- /dev/null +++ b/tests/protocol/test_hostplugin.py @@ -0,0 +1,85 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from tests.tools import * +import unittest +import azurelinuxagent.common.protocol.wire as wire +import azurelinuxagent.common.protocol.restapi as restapi + +wireserver_url = "168.63.129.16" +sas_url = "http://sas_url" +api_versions = '["2015-09-01"]' + + +class TestHostPlugin(AgentTestCase): + def test_fallback(self): + with patch.object(wire.HostPluginProtocol, + "put_vm_status") as patch_put: + with patch.object(wire.StatusBlob, "upload") as patch_upload: + patch_upload.return_value = False + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.upload_status_blob() + self.assertTrue(patch_put.call_count == 1, + "Fallback was not engaged") + self.assertTrue(patch_put.call_args[0][1] == sas_url) + + def test_no_fallback(self): + with patch.object(wire.HostPluginProtocol, + "put_vm_status") as patch_put: + with patch.object(wire.StatusBlob, "upload") as patch_upload: + patch_upload.return_value = True + wire_protocol_client = wire.WireProtocol(wireserver_url).client + wire_protocol_client.ext_conf = wire.ExtensionsConfig(None) + wire_protocol_client.ext_conf.status_upload_blob = sas_url + wire_protocol_client.upload_status_blob() + self.assertTrue(patch_put.call_count == 0, + "Fallback was engaged") + + def test_init_put(self): + expected_url = "http://168.63.129.16:32526/status" + expected_headers = {'x-ms-version': '2015-09-01'} + expected_content = '{"content": "b2s=", ' \ + '"headers": [{"headerName": "x-ms-version", ' \ + '"headerValue": "2014-02-14"}, ' \ + '{"headerName": "x-ms-blob-type", "headerValue": ' \ + '"BlockBlob"}], ' \ + '"requestUri": "http://sas_url"}' + + host_client = wire.HostPluginProtocol(wireserver_url) + self.assertFalse(host_client.is_initialized) + self.assertTrue(host_client.api_versions is None) + status_blob = wire.StatusBlob(None) + status_blob.vm_status = "ok" + status_blob.type = "BlockBlob" + with patch.object(wire.HostPluginProtocol, + "get_api_versions") as patch_get: + patch_get.return_value = api_versions + with patch.object(restapi.restutil, "http_put") as patch_put: + patch_put.return_value = MagicMock() + host_client.put_vm_status(status_blob, sas_url) + self.assertTrue(host_client.is_initialized) + self.assertFalse(host_client.api_versions is None) + self.assertTrue(patch_put.call_count == 1) + self.assertTrue(patch_put.call_args[0][0] == expected_url) + self.assertTrue(patch_put.call_args[0][1] == expected_content) + self.assertTrue(patch_put.call_args[0][2] == expected_headers) + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/protocol/test_metadata.py b/tests/protocol/test_metadata.py index fca1a82..e2ef57a 100644 --- a/tests/protocol/test_metadata.py +++ b/tests/protocol/test_metadata.py @@ -14,17 +14,14 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * from tests.protocol.mockmetadata import * -from azurelinuxagent.utils.restutil import httpclient -from azurelinuxagent.protocol.metadata import MetadataProtocol +from azurelinuxagent.common.utils.restutil import httpclient +from azurelinuxagent.common.protocol.metadata import MetadataProtocol @patch("time.sleep") -@patch("azurelinuxagent.protocol.metadata.restutil") +@patch("azurelinuxagent.common.protocol.metadata.restutil") class TestWireProtocolGetters(AgentTestCase): def _test_getters(self, test_data, mock_restutil ,_): mock_restutil.http_get.side_effect = test_data.mock_http_get @@ -33,7 +30,7 @@ class TestWireProtocolGetters(AgentTestCase): protocol.detect() protocol.get_vminfo() protocol.get_certs() - ext_handlers, etag= protocol.get_ext_handlers() + ext_handlers, etag = protocol.get_ext_handlers() for ext_handler in ext_handlers.extHandlers: protocol.get_ext_handler_pkgs(ext_handler) diff --git a/tests/distro/test_protocol_util.py b/tests/protocol/test_protocol_util.py index 61339f3..cb9a06f 100644 --- a/tests/distro/test_protocol_util.py +++ b/tests/protocol/test_protocol_util.py @@ -14,72 +14,64 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * -from azurelinuxagent.distro.loader import get_distro -from azurelinuxagent.exception import * -from azurelinuxagent.distro.default.protocolUtil import * +from azurelinuxagent.common.exception import * +from azurelinuxagent.common.protocol import get_protocol_util, \ + TAG_FILE_NAME @patch("time.sleep") class TestProtocolUtil(AgentTestCase): - @distros() - @patch("azurelinuxagent.distro.default.protocolUtil.MetadataProtocol") - @patch("azurelinuxagent.distro.default.protocolUtil.WireProtocol") - def test_detect_protocol(self, distro_name, distro_version, distro_full_name, - WireProtocol, MetadataProtocol, _, *distro_args): - + @patch("azurelinuxagent.common.protocol.util.MetadataProtocol") + @patch("azurelinuxagent.common.protocol.util.WireProtocol") + def test_detect_protocol(self, WireProtocol, MetadataProtocol, _): WireProtocol.return_value = MagicMock() MetadataProtocol.return_value = MagicMock() + + protocol_util = get_protocol_util() - distro = get_distro(distro_name, distro_version, distro_full_name) - distro.dhcp_handler = MagicMock() - distro.dhcp_handler.endpoint = "foo.bar" + protocol_util.dhcp_handler = MagicMock() + protocol_util.dhcp_handler.endpoint = "foo.bar" #Test wire protocol is available - protocol = distro.protocol_util.detect_protocol() + protocol = protocol_util.get_protocol() self.assertEquals(WireProtocol.return_value, protocol) #Test wire protocol is not available - distro.protocol_util.protocol = None - WireProtocol.side_effect = ProtocolError() + protocol_util.clear_protocol() + WireProtocol.return_value.detect.side_effect = ProtocolError() - protocol = distro.protocol_util.detect_protocol() + protocol = protocol_util.get_protocol() self.assertEquals(MetadataProtocol.return_value, protocol) #Test no protocol is available - distro.protocol_util.protocol = None - WireProtocol.side_effect = ProtocolError() - MetadataProtocol.side_effect = ProtocolError() - self.assertRaises(ProtocolError, distro.protocol_util.detect_protocol) + protocol_util.clear_protocol() + WireProtocol.return_value.detect.side_effect = ProtocolError() - @distros() - def test_detect_protocol_by_file(self, distro_name, distro_version, - distro_full_name, _): - distro = get_distro(distro_name, distro_version, distro_full_name) - protocol_util = distro.protocol_util + MetadataProtocol.return_value.detect.side_effect = ProtocolError() + self.assertRaises(ProtocolError, protocol_util.get_protocol) + def test_detect_protocol_by_file(self, _): + protocol_util = get_protocol_util() protocol_util._detect_wire_protocol = Mock() protocol_util._detect_metadata_protocol = Mock() tag_file = os.path.join(self.tmp_dir, TAG_FILE_NAME) #Test tag file doesn't exist - protocol_util.detect_protocol_by_file() + protocol_util.get_protocol_by_file() protocol_util._detect_wire_protocol.assert_any_call() protocol_util._detect_metadata_protocol.assert_not_called() #Test tag file exists - protocol_util.protocol = None + protocol_util.clear_protocol() protocol_util._detect_wire_protocol.reset_mock() protocol_util._detect_metadata_protocol.reset_mock() with open(tag_file, "w+") as tag_fd: tag_fd.write("") - protocol_util.detect_protocol_by_file() + protocol_util.get_protocol_by_file() protocol_util._detect_metadata_protocol.assert_any_call() protocol_util._detect_wire_protocol.assert_not_called() diff --git a/tests/protocol/test_restapi.py b/tests/protocol/test_restapi.py index 656ecc6..e4b65c9 100644 --- a/tests/protocol/test_restapi.py +++ b/tests/protocol/test_restapi.py @@ -14,9 +14,6 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * import uuid @@ -24,7 +21,7 @@ import unittest import os import shutil import time -from azurelinuxagent.protocol.restapi import * +from azurelinuxagent.common.protocol.restapi import * class SampleDataContract(DataContract): def __init__(self): diff --git a/tests/protocol/test_wire.py b/tests/protocol/test_wire.py index 4c38c13..bd3acaf 100644 --- a/tests/protocol/test_wire.py +++ b/tests/protocol/test_wire.py @@ -14,9 +14,6 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * from tests.protocol.mockwiredata import * @@ -24,18 +21,18 @@ import uuid import unittest import os import time -from azurelinuxagent.utils.restutil import httpclient -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.protocol.restapi import * -from azurelinuxagent.protocol.wire import WireClient, WireProtocol, \ +from azurelinuxagent.common.utils.restutil import httpclient +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.protocol.wire import WireClient, WireProtocol, \ TRANSPORT_PRV_FILE_NAME, \ TRANSPORT_CERT_FILE_NAME data_with_bom = b'\xef\xbb\xbfhehe' @patch("time.sleep") -@patch("azurelinuxagent.protocol.wire.CryptUtil") -@patch("azurelinuxagent.protocol.wire.restutil") +@patch("azurelinuxagent.common.protocol.wire.CryptUtil") +@patch("azurelinuxagent.common.protocol.wire.restutil") class TestWireProtocolGetters(AgentTestCase): def _test_getters(self, test_data, mock_restutil, MockCryptUtil, _): diff --git a/tests/test_import.py b/tests/test_import.py new file mode 100644 index 0000000..39a48ab --- /dev/null +++ b/tests/test_import.py @@ -0,0 +1,26 @@ +from tests.tools import * +import azurelinuxagent.common.osutil as osutil +import azurelinuxagent.common.dhcp as dhcp +import azurelinuxagent.common.protocol as protocol +import azurelinuxagent.pa.provision as provision +import azurelinuxagent.pa.deprovision as deprovision +import azurelinuxagent.daemon as daemon +import azurelinuxagent.daemon.resourcedisk as resourcedisk +import azurelinuxagent.daemon.scvmm as scvmm +import azurelinuxagent.ga.exthandlers as exthandlers +import azurelinuxagent.ga.monitor as monitor +import azurelinuxagent.ga.update as update + +class TestImportHandler(AgentTestCase): + def test_get_handler(self): + osutil.get_osutil() + protocol.get_protocol_util() + dhcp.get_dhcp_handler() + provision.get_provision_handler() + deprovision.get_deprovision_handler() + daemon.get_daemon_handler() + resourcedisk.get_resourcedisk_handler() + scvmm.get_scvmm_handler() + monitor.get_monitor_handler() + update.get_update_handler() + exthandlers.get_exthandlers_handler() diff --git a/tests/tools.py b/tests/tools.py index 672c60b..8bf23ed 100644 --- a/tests/tools.py +++ b/tests/tools.py @@ -14,31 +14,31 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx """ Define util functions for unit test """ -import re +import json import os -import sys -import unittest +import re import shutil -import json +import sys import tempfile +import unittest + from functools import wraps -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -import azurelinuxagent.event as event + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.event as event +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.version import PY_VERSION_MAJOR #Import mock module for Python2 and Python3 try: - from unittest.mock import Mock, patch, MagicMock + from unittest.mock import Mock, patch, MagicMock, DEFAULT, call except ImportError: - from mock import Mock, patch, MagicMock + from mock import Mock, patch, MagicMock, DEFAULT, call test_dir = os.path.dirname(os.path.abspath(__file__)) data_dir = os.path.join(test_dir, "data") @@ -56,6 +56,7 @@ class AgentTestCase(unittest.TestCase): def setUp(self): prefix = "{0}_".format(self.__class__.__name__) self.tmp_dir = tempfile.mkdtemp(prefix=prefix) + conf.get_autoupdate_enabled = Mock(return_value=True) conf.get_lib_dir = Mock(return_value=self.tmp_dir) ext_log_dir = os.path.join(self.tmp_dir, "azure") conf.get_ext_log_dir = Mock(return_value=ext_log_dir) @@ -98,6 +99,12 @@ supported_distro = [ ] +def open_patch(): + open_name = '__builtin__.open' + if PY_VERSION_MAJOR == 3: + open_name = 'builtins.open' + return open_name + def distros(distro_name=".*", distro_version=".*", distro_full_name=".*"): """Run test on multiple distros""" def decorator(test_method): diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py index 9bdb27e..2ef4c16 100644 --- a/tests/utils/__init__.py +++ b/tests/utils/__init__.py @@ -14,6 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx diff --git a/tests/utils/test_file_util.py b/tests/utils/test_file_util.py index bf7c638..9a5479e 100644 --- a/tests/utils/test_file_util.py +++ b/tests/utils/test_file_util.py @@ -14,17 +14,14 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * import uuid import unittest import os import sys -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil class TestFileOperations(AgentTestCase): def test_read_write_file(self): diff --git a/tests/utils/test_flexible_version.py b/tests/utils/test_flexible_version.py new file mode 100644 index 0000000..1162022 --- /dev/null +++ b/tests/utils/test_flexible_version.py @@ -0,0 +1,477 @@ +import random +import re +import unittest + +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion + +class TestFlexibleVersion(unittest.TestCase): + + def setUp(self): + self.v = FlexibleVersion() + + def test_compile_separator(self): + tests = [ + '.', + '', + '-' + ] + for t in tests: + t_escaped = re.escape(t) + t_re = re.compile(t_escaped) + self.assertEqual((t_escaped, t_re), self.v._compile_separator(t)) + self.assertEqual(('', re.compile('')), self.v._compile_separator(None)) + return + + def test_compile_pattern(self): + self.v._compile_pattern() + tests = { + '1': True, + '1.2': True, + '1.2.3': True, + '1.2.3.4': True, + '1.2.3.4.5': True, + + '1alpha': True, + '1.alpha': True, + '1-alpha': True, + '1alpha0': True, + '1.alpha0': True, + '1-alpha0': True, + '1.2alpha': True, + '1.2.alpha': True, + '1.2-alpha': True, + '1.2alpha0': True, + '1.2.alpha0': True, + '1.2-alpha0': True, + + '1beta': True, + '1.beta': True, + '1-beta': True, + '1beta0': True, + '1.beta0': True, + '1-beta0': True, + '1.2beta': True, + '1.2.beta': True, + '1.2-beta': True, + '1.2beta0': True, + '1.2.beta0': True, + '1.2-beta0': True, + + '1rc': True, + '1.rc': True, + '1-rc': True, + '1rc0': True, + '1.rc0': True, + '1-rc0': True, + '1.2rc': True, + '1.2.rc': True, + '1.2-rc': True, + '1.2rc0': True, + '1.2.rc0': True, + '1.2-rc0': True, + + '1.2.3.4alpha5': True, + + ' 1': False, + 'beta': False, + '1delta0': False, + '': False + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + self.v.version_re.match(test) is not None, + "test: {0} expected: {1} ".format(test, expectation)) + return + + def test_compile_pattern_sep(self): + self.v.sep = '-' + self.v._compile_pattern() + tests = { + '1': True, + '1-2': True, + '1-2-3': True, + '1-2-3-4': True, + '1-2-3-4-5': True, + + '1alpha': True, + '1-alpha': True, + '1-alpha': True, + '1alpha0': True, + '1-alpha0': True, + '1-alpha0': True, + '1-2alpha': True, + '1-2.alpha': True, + '1-2-alpha': True, + '1-2alpha0': True, + '1-2.alpha0': True, + '1-2-alpha0': True, + + '1beta': True, + '1-beta': True, + '1-beta': True, + '1beta0': True, + '1-beta0': True, + '1-beta0': True, + '1-2beta': True, + '1-2.beta': True, + '1-2-beta': True, + '1-2beta0': True, + '1-2.beta0': True, + '1-2-beta0': True, + + '1rc': True, + '1-rc': True, + '1-rc': True, + '1rc0': True, + '1-rc0': True, + '1-rc0': True, + '1-2rc': True, + '1-2.rc': True, + '1-2-rc': True, + '1-2rc0': True, + '1-2.rc0': True, + '1-2-rc0': True, + + '1-2-3-4alpha5': True, + + ' 1': False, + 'beta': False, + '1delta0': False, + '': False + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + self.v.version_re.match(test) is not None, + "test: {0} expected: {1} ".format(test, expectation)) + return + + def test_compile_pattern_prerel(self): + self.v.prerel_tags = ('a', 'b', 'c') + self.v._compile_pattern() + tests = { + '1': True, + '1.2': True, + '1.2.3': True, + '1.2.3.4': True, + '1.2.3.4.5': True, + + '1a': True, + '1.a': True, + '1-a': True, + '1a0': True, + '1.a0': True, + '1-a0': True, + '1.2a': True, + '1.2.a': True, + '1.2-a': True, + '1.2a0': True, + '1.2.a0': True, + '1.2-a0': True, + + '1b': True, + '1.b': True, + '1-b': True, + '1b0': True, + '1.b0': True, + '1-b0': True, + '1.2b': True, + '1.2.b': True, + '1.2-b': True, + '1.2b0': True, + '1.2.b0': True, + '1.2-b0': True, + + '1c': True, + '1.c': True, + '1-c': True, + '1c0': True, + '1.c0': True, + '1-c0': True, + '1.2c': True, + '1.2.c': True, + '1.2-c': True, + '1.2c0': True, + '1.2.c0': True, + '1.2-c0': True, + + '1.2.3.4a5': True, + + ' 1': False, + '1.2.3.4alpha5': False, + 'beta': False, + '1delta0': False, + '': False + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + self.v.version_re.match(test) is not None, + "test: {0} expected: {1} ".format(test, expectation)) + return + + def test_ensure_compatible_separators(self): + v1 = FlexibleVersion('1.2.3') + v2 = FlexibleVersion('1-2-3', sep='-') + try: + v1 == v2 + self.assertTrue(False, "Incompatible separators failed to raise an exception") + except ValueError: + pass + except Exception as e: + t = e.__class__.__name__ + self.assertTrue(False, "Incompatible separators raised an unexpected exception: {0}" \ + .format(t)) + return + + def test_ensure_compatible_prerel(self): + v1 = FlexibleVersion('1.2.3', prerel_tags=('alpha', 'beta', 'rc')) + v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) + try: + v1 == v2 + self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") + except ValueError: + pass + except Exception as e: + t = e.__class__.__name__ + self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ + .format(t)) + return + + def test_ensure_compatible_prerel_length(self): + v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b', 'c')) + v2 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) + try: + v1 == v2 + self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") + except ValueError: + pass + except Exception as e: + t = e.__class__.__name__ + self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ + .format(t)) + return + + def test_ensure_compatible_prerel_order(self): + v1 = FlexibleVersion('1.2.3', prerel_tags=('a', 'b')) + v2 = FlexibleVersion('1.2.3', prerel_tags=('b', 'a')) + try: + v1 == v2 + self.assertTrue(False, "Incompatible prerel_tags failed to raise an exception") + except ValueError: + pass + except Exception as e: + t = e.__class__.__name__ + self.assertTrue(False, "Incompatible prerel_tags raised an unexpected exception: {0}" \ + .format(t)) + return + + def test_major(self): + tests = { + '1' : 1, + '1.2' : 1, + '1.2.3' : 1, + '1.2.3.4' : 1 + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + FlexibleVersion(test).major) + return + + def test_minor(self): + tests = { + '1' : 0, + '1.2' : 2, + '1.2.3' : 2, + '1.2.3.4' : 2 + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + FlexibleVersion(test).minor) + return + + def test_patch(self): + tests = { + '1' : 0, + '1.2' : 0, + '1.2.3' : 3, + '1.2.3.4' : 3 + } + for test in iter(tests): + expectation = tests[test] + self.assertEqual( + expectation, + FlexibleVersion(test).patch) + return + + def test_parse(self): + tests = { + "1.2.3.4": ((1, 2, 3, 4), None), + "1.2.3.4alpha5": ((1, 2, 3, 4), ('alpha', 5)), + "1.2.3.4-alpha5": ((1, 2, 3, 4), ('alpha', 5)), + "1.2.3.4.alpha5": ((1, 2, 3, 4), ('alpha', 5)) + } + for test in iter(tests): + expectation = tests[test] + self.v._parse(test) + self.assertEqual(expectation, (self.v.version, self.v.prerelease)) + return + + def test_decrement(self): + src_v = FlexibleVersion('1.0.0.0.10') + dst_v = FlexibleVersion(str(src_v)) + for i in range(1,10): + dst_v -= 1 + self.assertEqual(i, src_v.version[-1] - dst_v.version[-1]) + return + + def test_decrement_disallows_below_zero(self): + try: + FlexibleVersion('1.0') - 1 + self.assertTrue(False, "Decrement failed to raise an exception") + except ArithmeticError: + pass + except Exception as e: + t = e.__class__.__name__ + self.assertTrue(False, "Decrement raised an unexpected exception: {0}".format(t)) + return + + def test_increment(self): + src_v = FlexibleVersion('1.0.0.0.0') + dst_v = FlexibleVersion(str(src_v)) + for i in range(1,10): + dst_v += 1 + self.assertEqual(i, dst_v.version[-1] - src_v.version[-1]) + return + + def test_str(self): + tests = [ + '1', + '1.2', + '1.2.3', + '1.2.3.4', + '1.2.3.4.5', + + '1alpha', + '1.alpha', + '1-alpha', + '1alpha0', + '1.alpha0', + '1-alpha0', + '1.2alpha', + '1.2.alpha', + '1.2-alpha', + '1.2alpha0', + '1.2.alpha0', + '1.2-alpha0', + + '1beta', + '1.beta', + '1-beta', + '1beta0', + '1.beta0', + '1-beta0', + '1.2beta', + '1.2.beta', + '1.2-beta', + '1.2beta0', + '1.2.beta0', + '1.2-beta0', + + '1rc', + '1.rc', + '1-rc', + '1rc0', + '1.rc0', + '1-rc0', + '1.2rc', + '1.2.rc', + '1.2-rc', + '1.2rc0', + '1.2.rc0', + '1.2-rc0', + + '1.2.3.4alpha5', + ] + for test in tests: + self.assertEqual(test, str(FlexibleVersion(test))) + return + + def test_repr(self): + v = FlexibleVersion('1,2,3rc4', ',', ['lol', 'rc']) + expected = "FlexibleVersion ('1,2,3rc4', ',', ('lol', 'rc'))" + self.assertEqual(expected, repr(v)) + + def test_order(self): + test0 = ["1.7.0", "1.7.0rc0", "1.11.0"] + expected0 = ['1.7.0rc0', '1.7.0', '1.11.0'] + self.assertEqual(expected0, list(map(str, sorted([FlexibleVersion(v) for v in test0])))) + + test1 = [ + '2.0.2rc2', + '2.2.0beta3', + '2.0.10', + '2.1.0alpha42', + '2.0.2beta4', + '2.1.1', + '2.0.1', + '2.0.2rc3', + '2.2.0', + '2.0.0', + '3.0.1', + '2.1.0rc1' + ] + expected1 = [ + '2.0.0', + '2.0.1', + '2.0.2beta4', + '2.0.2rc2', + '2.0.2rc3', + '2.0.10', + '2.1.0alpha42', + '2.1.0rc1', + '2.1.1', + '2.2.0beta3', + '2.2.0', + '3.0.1' + ] + self.assertEqual(expected1, list(map(str, sorted([FlexibleVersion(v) for v in test1])))) + + self.assertEqual(FlexibleVersion("1.0.0.0.0.0.0.0"), FlexibleVersion("1")) + + self.assertFalse(FlexibleVersion("1.0") > FlexibleVersion("1.0")) + self.assertFalse(FlexibleVersion("1.0") < FlexibleVersion("1.0")) + + self.assertTrue(FlexibleVersion("1.0") < FlexibleVersion("1.1")) + self.assertTrue(FlexibleVersion("1.9") < FlexibleVersion("1.10")) + self.assertTrue(FlexibleVersion("1.9.9") < FlexibleVersion("1.10.0")) + self.assertTrue(FlexibleVersion("1.0.0.0") < FlexibleVersion("1.2.0.0")) + + self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) + self.assertTrue(FlexibleVersion("1.10") > FlexibleVersion("1.9")) + self.assertTrue(FlexibleVersion("1.10.0") > FlexibleVersion("1.9.9")) + self.assertTrue(FlexibleVersion("1.2.0.0") > FlexibleVersion("1.0.0.0")) + + self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.1")) + self.assertTrue(FlexibleVersion("1.1") > FlexibleVersion("1.0")) + self.assertTrue(FlexibleVersion("1.1") >= FlexibleVersion("1.0")) + + self.assertTrue(FlexibleVersion("1.0") == FlexibleVersion("1.0")) + self.assertTrue(FlexibleVersion("1.0") >= FlexibleVersion("1.0")) + self.assertTrue(FlexibleVersion("1.0") <= FlexibleVersion("1.0")) + + self.assertFalse(FlexibleVersion("1.0") != FlexibleVersion("1.0")) + self.assertTrue(FlexibleVersion("1.1") != FlexibleVersion("1.0")) + return + + +if __name__ == '__main__': + unittest.main() diff --git a/tests/utils/test_rest_util.py b/tests/utils/test_rest_util.py index bd22c55..874e527 100644 --- a/tests/utils/test_rest_util.py +++ b/tests/utils/test_rest_util.py @@ -14,17 +14,14 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import AgentTestCase, patch, Mock, MagicMock import uuid import unittest import os -import azurelinuxagent.utils.restutil as restutil -from azurelinuxagent.future import ustr, httpclient -import azurelinuxagent.logger as logger +import azurelinuxagent.common.utils.restutil as restutil +from azurelinuxagent.common.future import ustr, httpclient +import azurelinuxagent.common.logger as logger class TestHttpOperations(AgentTestCase): @@ -57,8 +54,8 @@ class TestHttpOperations(AgentTestCase): self.assertEquals(rel_uri, "None") - @patch("azurelinuxagent.future.httpclient.HTTPSConnection") - @patch("azurelinuxagent.future.httpclient.HTTPConnection") + @patch("azurelinuxagent.common.future.httpclient.HTTPSConnection") + @patch("azurelinuxagent.common.future.httpclient.HTTPConnection") def test_http_request(self, HTTPConnection, HTTPSConnection): mock_httpconn = MagicMock() mock_httpresp = MagicMock() @@ -98,7 +95,7 @@ class TestHttpOperations(AgentTestCase): self.assertEquals("_(:3| <)_", resp.read()) @patch("time.sleep") - @patch("azurelinuxagent.utils.restutil._http_request") + @patch("azurelinuxagent.common.utils.restutil._http_request") def test_http_request_with_retry(self, _http_request, sleep): mock_httpresp = MagicMock() mock_httpresp.read = Mock(return_value="hehe") diff --git a/tests/utils/test_shell_util.py b/tests/utils/test_shell_util.py index aa89121..57e6c33 100644 --- a/tests/utils/test_shell_util.py +++ b/tests/utils/test_shell_util.py @@ -15,15 +15,12 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * import uuid import unittest import os -import azurelinuxagent.utils.shellutil as shellutil +import azurelinuxagent.common.utils.shellutil as shellutil import test class TestrunCmd(AgentTestCase): @@ -38,5 +35,10 @@ class TestrunCmd(AgentTestCase): err = shellutil.run_get_output(u"ls 我") self.assertNotEquals(0, err[0]) + def test_shellquote(self): + self.assertEqual("\'foo\'", shellutil.quote("foo")) + self.assertEqual("\'foo bar\'", shellutil.quote("foo bar")) + self.assertEqual("'foo'\\''bar'", shellutil.quote("foo\'bar")) + if __name__ == '__main__': unittest.main() diff --git a/tests/utils/test_text_util.py b/tests/utils/test_text_util.py index 0e8cc7d..9ac0707 100644 --- a/tests/utils/test_text_util.py +++ b/tests/utils/test_text_util.py @@ -14,17 +14,14 @@ # # Requires Python 2.4+ and Openssl 1.0+ # -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx from tests.tools import * import uuid import unittest import os -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.utils.textutil import Version +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.textutil import Version class TestTextUtil(AgentTestCase): def test_get_password_hash(self): |