From 5009a9d0f3606fc08a80ec0d59076d8dc48d2f25 Mon Sep 17 00:00:00 2001 From: Daniel Watkins Date: Tue, 13 Sep 2016 16:11:47 +0100 Subject: Import patches-unapplied version 2.1.5-0ubuntu1 to ubuntu/yakkety-proposed Imported using git-ubuntu import. Changelog parent: 0f7cef5b52162d1ebb31a738bd8fc9febe1fbda6 New changelog entries: * New upstream release (LP: #1603581) - d/patches/disable-auto-update.patch: - The new version introduces auto-updating of the agent to its latest version via an internal mechanism; disable this - d/patches/fix_shebangs.patch: - Dropped in favour of the dh_python3 --shebang option. - Refreshed d/patches/disable_udev_overrides.patch --- azurelinuxagent/__init__.py | 1 - azurelinuxagent/agent.py | 99 +- azurelinuxagent/common/__init__.py | 17 + azurelinuxagent/common/conf.py | 181 ++++ azurelinuxagent/common/dhcp.py | 400 +++++++ azurelinuxagent/common/event.py | 124 +++ azurelinuxagent/common/exception.py | 123 +++ azurelinuxagent/common/future.py | 31 + azurelinuxagent/common/logger.py | 156 +++ azurelinuxagent/common/osutil/__init__.py | 18 + azurelinuxagent/common/osutil/coreos.py | 92 ++ azurelinuxagent/common/osutil/debian.py | 47 + azurelinuxagent/common/osutil/default.py | 792 ++++++++++++++ azurelinuxagent/common/osutil/factory.py | 69 ++ azurelinuxagent/common/osutil/freebsd.py | 198 ++++ azurelinuxagent/common/osutil/redhat.py | 122 +++ azurelinuxagent/common/osutil/suse.py | 108 ++ azurelinuxagent/common/osutil/ubuntu.py | 66 ++ azurelinuxagent/common/protocol/__init__.py | 21 + azurelinuxagent/common/protocol/hostplugin.py | 124 +++ azurelinuxagent/common/protocol/metadata.py | 223 ++++ azurelinuxagent/common/protocol/ovfenv.py | 113 ++ azurelinuxagent/common/protocol/restapi.py | 272 +++++ azurelinuxagent/common/protocol/util.py | 285 +++++ azurelinuxagent/common/protocol/wire.py | 1218 ++++++++++++++++++++++ azurelinuxagent/common/rdma.py | 280 +++++ azurelinuxagent/common/utils/__init__.py | 17 + azurelinuxagent/common/utils/cryptutil.py | 121 +++ azurelinuxagent/common/utils/fileutil.py | 171 +++ azurelinuxagent/common/utils/flexible_version.py | 199 ++++ azurelinuxagent/common/utils/restutil.py | 156 +++ azurelinuxagent/common/utils/shellutil.py | 107 ++ azurelinuxagent/common/utils/textutil.py | 279 +++++ azurelinuxagent/common/version.py | 116 +++ azurelinuxagent/conf.py | 166 --- azurelinuxagent/daemon/__init__.py | 18 + azurelinuxagent/daemon/main.py | 130 +++ azurelinuxagent/daemon/resourcedisk/__init__.py | 20 + azurelinuxagent/daemon/resourcedisk/default.py | 219 ++++ azurelinuxagent/daemon/resourcedisk/factory.py | 33 + azurelinuxagent/daemon/resourcedisk/freebsd.py | 117 +++ azurelinuxagent/daemon/scvmm.py | 74 ++ azurelinuxagent/distro/__init__.py | 2 - azurelinuxagent/distro/coreos/__init__.py | 18 - azurelinuxagent/distro/coreos/deprovision.py | 33 - azurelinuxagent/distro/coreos/distro.py | 29 - azurelinuxagent/distro/coreos/osutil.py | 95 -- azurelinuxagent/distro/debian/__init__.py | 19 - azurelinuxagent/distro/debian/distro.py | 27 - azurelinuxagent/distro/debian/loader.py | 24 - azurelinuxagent/distro/debian/osutil.py | 47 - azurelinuxagent/distro/default/__init__.py | 19 - azurelinuxagent/distro/default/daemon.py | 103 -- azurelinuxagent/distro/default/deprovision.py | 125 --- azurelinuxagent/distro/default/dhcp.py | 318 ------ azurelinuxagent/distro/default/distro.py | 51 - azurelinuxagent/distro/default/env.py | 104 -- azurelinuxagent/distro/default/extension.py | 817 --------------- azurelinuxagent/distro/default/init.py | 53 - azurelinuxagent/distro/default/monitor.py | 182 ---- azurelinuxagent/distro/default/osutil.py | 623 ----------- azurelinuxagent/distro/default/protocolUtil.py | 243 ----- azurelinuxagent/distro/default/provision.py | 191 ---- azurelinuxagent/distro/default/resourceDisk.py | 167 --- azurelinuxagent/distro/default/scvmm.py | 48 - azurelinuxagent/distro/loader.py | 67 -- azurelinuxagent/distro/redhat/__init__.py | 19 - azurelinuxagent/distro/redhat/distro.py | 32 - azurelinuxagent/distro/redhat/osutil.py | 115 -- azurelinuxagent/distro/suse/__init__.py | 2 - azurelinuxagent/distro/suse/distro.py | 32 - azurelinuxagent/distro/suse/osutil.py | 108 -- azurelinuxagent/distro/ubuntu/__init__.py | 19 - azurelinuxagent/distro/ubuntu/deprovision.py | 46 - azurelinuxagent/distro/ubuntu/distro.py | 55 - azurelinuxagent/distro/ubuntu/osutil.py | 75 -- azurelinuxagent/distro/ubuntu/provision.py | 98 -- azurelinuxagent/event.py | 124 --- azurelinuxagent/exception.py | 115 -- azurelinuxagent/future.py | 31 - azurelinuxagent/ga/__init__.py | 17 + azurelinuxagent/ga/env.py | 112 ++ azurelinuxagent/ga/exthandlers.py | 902 ++++++++++++++++ azurelinuxagent/ga/monitor.py | 192 ++++ azurelinuxagent/ga/update.py | 715 +++++++++++++ azurelinuxagent/logger.py | 161 --- azurelinuxagent/metadata.py | 82 -- azurelinuxagent/pa/__init__.py | 17 + azurelinuxagent/pa/deprovision/__init__.py | 20 + azurelinuxagent/pa/deprovision/coreos.py | 34 + azurelinuxagent/pa/deprovision/default.py | 131 +++ azurelinuxagent/pa/deprovision/factory.py | 36 + azurelinuxagent/pa/deprovision/ubuntu.py | 47 + azurelinuxagent/pa/provision/__init__.py | 18 + azurelinuxagent/pa/provision/default.py | 196 ++++ azurelinuxagent/pa/provision/factory.py | 32 + azurelinuxagent/pa/provision/ubuntu.py | 98 ++ azurelinuxagent/pa/rdma/__init__.py | 18 + azurelinuxagent/pa/rdma/centos.py | 203 ++++ azurelinuxagent/pa/rdma/factory.py | 41 + azurelinuxagent/pa/rdma/suse.py | 130 +++ azurelinuxagent/protocol/__init__.py | 18 - azurelinuxagent/protocol/metadata.py | 195 ---- azurelinuxagent/protocol/ovfenv.py | 113 -- azurelinuxagent/protocol/restapi.py | 250 ----- azurelinuxagent/protocol/wire.py | 1155 -------------------- azurelinuxagent/utils/__init__.py | 19 - azurelinuxagent/utils/cryptutil.py | 121 --- azurelinuxagent/utils/fileutil.py | 187 ---- azurelinuxagent/utils/restutil.py | 156 --- azurelinuxagent/utils/shellutil.py | 89 -- azurelinuxagent/utils/textutil.py | 236 ----- 112 files changed, 9893 insertions(+), 7247 deletions(-) create mode 100644 azurelinuxagent/common/__init__.py create mode 100644 azurelinuxagent/common/conf.py create mode 100644 azurelinuxagent/common/dhcp.py create mode 100644 azurelinuxagent/common/event.py create mode 100644 azurelinuxagent/common/exception.py create mode 100644 azurelinuxagent/common/future.py create mode 100644 azurelinuxagent/common/logger.py create mode 100644 azurelinuxagent/common/osutil/__init__.py create mode 100644 azurelinuxagent/common/osutil/coreos.py create mode 100644 azurelinuxagent/common/osutil/debian.py create mode 100644 azurelinuxagent/common/osutil/default.py create mode 100644 azurelinuxagent/common/osutil/factory.py create mode 100644 azurelinuxagent/common/osutil/freebsd.py create mode 100644 azurelinuxagent/common/osutil/redhat.py create mode 100644 azurelinuxagent/common/osutil/suse.py create mode 100644 azurelinuxagent/common/osutil/ubuntu.py create mode 100644 azurelinuxagent/common/protocol/__init__.py create mode 100644 azurelinuxagent/common/protocol/hostplugin.py create mode 100644 azurelinuxagent/common/protocol/metadata.py create mode 100644 azurelinuxagent/common/protocol/ovfenv.py create mode 100644 azurelinuxagent/common/protocol/restapi.py create mode 100644 azurelinuxagent/common/protocol/util.py create mode 100644 azurelinuxagent/common/protocol/wire.py create mode 100644 azurelinuxagent/common/rdma.py create mode 100644 azurelinuxagent/common/utils/__init__.py create mode 100644 azurelinuxagent/common/utils/cryptutil.py create mode 100644 azurelinuxagent/common/utils/fileutil.py create mode 100644 azurelinuxagent/common/utils/flexible_version.py create mode 100644 azurelinuxagent/common/utils/restutil.py create mode 100644 azurelinuxagent/common/utils/shellutil.py create mode 100644 azurelinuxagent/common/utils/textutil.py create mode 100644 azurelinuxagent/common/version.py delete mode 100644 azurelinuxagent/conf.py create mode 100644 azurelinuxagent/daemon/__init__.py create mode 100644 azurelinuxagent/daemon/main.py create mode 100644 azurelinuxagent/daemon/resourcedisk/__init__.py create mode 100644 azurelinuxagent/daemon/resourcedisk/default.py create mode 100644 azurelinuxagent/daemon/resourcedisk/factory.py create mode 100644 azurelinuxagent/daemon/resourcedisk/freebsd.py create mode 100644 azurelinuxagent/daemon/scvmm.py delete mode 100644 azurelinuxagent/distro/coreos/__init__.py delete mode 100644 azurelinuxagent/distro/coreos/deprovision.py delete mode 100644 azurelinuxagent/distro/coreos/distro.py delete mode 100644 azurelinuxagent/distro/coreos/osutil.py delete mode 100644 azurelinuxagent/distro/debian/__init__.py delete mode 100644 azurelinuxagent/distro/debian/distro.py delete mode 100644 azurelinuxagent/distro/debian/loader.py delete mode 100644 azurelinuxagent/distro/debian/osutil.py delete mode 100644 azurelinuxagent/distro/default/__init__.py delete mode 100644 azurelinuxagent/distro/default/daemon.py delete mode 100644 azurelinuxagent/distro/default/deprovision.py delete mode 100644 azurelinuxagent/distro/default/dhcp.py delete mode 100644 azurelinuxagent/distro/default/distro.py delete mode 100644 azurelinuxagent/distro/default/env.py delete mode 100644 azurelinuxagent/distro/default/extension.py delete mode 100644 azurelinuxagent/distro/default/init.py delete mode 100644 azurelinuxagent/distro/default/monitor.py delete mode 100644 azurelinuxagent/distro/default/osutil.py delete mode 100644 azurelinuxagent/distro/default/protocolUtil.py delete mode 100644 azurelinuxagent/distro/default/provision.py delete mode 100644 azurelinuxagent/distro/default/resourceDisk.py delete mode 100644 azurelinuxagent/distro/default/scvmm.py delete mode 100644 azurelinuxagent/distro/loader.py delete mode 100644 azurelinuxagent/distro/redhat/__init__.py delete mode 100644 azurelinuxagent/distro/redhat/distro.py delete mode 100644 azurelinuxagent/distro/redhat/osutil.py delete mode 100644 azurelinuxagent/distro/suse/distro.py delete mode 100644 azurelinuxagent/distro/suse/osutil.py delete mode 100644 azurelinuxagent/distro/ubuntu/__init__.py delete mode 100644 azurelinuxagent/distro/ubuntu/deprovision.py delete mode 100644 azurelinuxagent/distro/ubuntu/distro.py delete mode 100644 azurelinuxagent/distro/ubuntu/osutil.py delete mode 100644 azurelinuxagent/distro/ubuntu/provision.py delete mode 100644 azurelinuxagent/event.py delete mode 100644 azurelinuxagent/exception.py delete mode 100644 azurelinuxagent/future.py create mode 100644 azurelinuxagent/ga/__init__.py create mode 100644 azurelinuxagent/ga/env.py create mode 100644 azurelinuxagent/ga/exthandlers.py create mode 100644 azurelinuxagent/ga/monitor.py create mode 100644 azurelinuxagent/ga/update.py delete mode 100644 azurelinuxagent/logger.py delete mode 100644 azurelinuxagent/metadata.py create mode 100644 azurelinuxagent/pa/__init__.py create mode 100644 azurelinuxagent/pa/deprovision/__init__.py create mode 100644 azurelinuxagent/pa/deprovision/coreos.py create mode 100644 azurelinuxagent/pa/deprovision/default.py create mode 100644 azurelinuxagent/pa/deprovision/factory.py create mode 100644 azurelinuxagent/pa/deprovision/ubuntu.py create mode 100644 azurelinuxagent/pa/provision/__init__.py create mode 100644 azurelinuxagent/pa/provision/default.py create mode 100644 azurelinuxagent/pa/provision/factory.py create mode 100644 azurelinuxagent/pa/provision/ubuntu.py create mode 100644 azurelinuxagent/pa/rdma/__init__.py create mode 100644 azurelinuxagent/pa/rdma/centos.py create mode 100644 azurelinuxagent/pa/rdma/factory.py create mode 100644 azurelinuxagent/pa/rdma/suse.py delete mode 100644 azurelinuxagent/protocol/__init__.py delete mode 100644 azurelinuxagent/protocol/metadata.py delete mode 100644 azurelinuxagent/protocol/ovfenv.py delete mode 100644 azurelinuxagent/protocol/restapi.py delete mode 100644 azurelinuxagent/protocol/wire.py delete mode 100644 azurelinuxagent/utils/__init__.py delete mode 100644 azurelinuxagent/utils/cryptutil.py delete mode 100644 azurelinuxagent/utils/fileutil.py delete mode 100644 azurelinuxagent/utils/restutil.py delete mode 100644 azurelinuxagent/utils/shellutil.py delete mode 100644 azurelinuxagent/utils/textutil.py (limited to 'azurelinuxagent') diff --git a/azurelinuxagent/__init__.py b/azurelinuxagent/__init__.py index 1ea2f38..2ef4c16 100644 --- a/azurelinuxagent/__init__.py +++ b/azurelinuxagent/__init__.py @@ -14,4 +14,3 @@ # # Requires Python 2.4+ and Openssl 1.0+ # - diff --git a/azurelinuxagent/agent.py b/azurelinuxagent/agent.py index 93e9c16..1309d94 100644 --- a/azurelinuxagent/agent.py +++ b/azurelinuxagent/agent.py @@ -25,48 +25,91 @@ import os import sys import re import subprocess -from azurelinuxagent.metadata import AGENT_NAME, AGENT_LONG_VERSION, \ +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.event as event +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.version import AGENT_NAME, AGENT_LONG_VERSION, \ DISTRO_NAME, DISTRO_VERSION, \ PY_VERSION_MAJOR, PY_VERSION_MINOR, \ PY_VERSION_MICRO - -from azurelinuxagent.distro.loader import get_distro +from azurelinuxagent.common.osutil import get_osutil class Agent(object): def __init__(self, verbose): """ Initialize agent running environment. """ - self.distro = get_distro(); - self.distro.init_handler.run(verbose) + self.osutil = get_osutil() + #Init stdout log + level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO + logger.add_logger_appender(logger.AppenderType.STDOUT, level) + + #Init config + conf_file_path = self.osutil.get_agent_conf_file_path() + conf.load_conf_from_file(conf_file_path) + + #Init log + verbose = verbose or conf.get_logs_verbose() + level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO + logger.add_logger_appender(logger.AppenderType.FILE, level, + path="/var/log/waagent.log") + logger.add_logger_appender(logger.AppenderType.CONSOLE, level, + path="/dev/console") + + #Init event reporter + event_dir = os.path.join(conf.get_lib_dir(), "events") + event.init_event_logger(event_dir) + event.enable_unhandled_err_dump("WALA") def daemon(self): """ Run agent daemon """ - self.distro.daemon_handler.run() + from azurelinuxagent.daemon import get_daemon_handler + daemon_handler = get_daemon_handler() + daemon_handler.run() + + def provision(self): + """ + Run provision command + """ + from azurelinuxagent.pa.provision import get_provision_handler + provision_handler = get_provision_handler() + provision_handler.run() def deprovision(self, force=False, deluser=False): """ Run deprovision command """ - self.distro.deprovision_handler.run(force=force, deluser=deluser) + from azurelinuxagent.pa.deprovision import get_deprovision_handler + deprovision_handler = get_deprovision_handler() + deprovision_handler.run(force=force, deluser=deluser) def register_service(self): """ Register agent as a service """ print("Register {0} service".format(AGENT_NAME)) - self.distro.osutil.register_agent_service() + self.osutil.register_agent_service() print("Start {0} service".format(AGENT_NAME)) - self.distro.osutil.start_agent_service() + self.osutil.start_agent_service() + + def run_exthandlers(self): + """ + Run the update and extension handler + """ + from azurelinuxagent.ga.update import get_update_handler + update_handler = get_update_handler() + update_handler.run() -def main(): +def main(args=[]): """ Parse command line arguments, exit with usage() on error. Invoke different methods according to different command """ - command, force, verbose = parse_args(sys.argv[1:]) + if len(args) <= 0: + args = sys.argv[1:] + command, force, verbose = parse_args(args) if command == "version": version() elif command == "help": @@ -74,15 +117,22 @@ def main(): elif command == "start": start() else: - agent = Agent(verbose) - if command == "deprovision+user": - agent.deprovision(force, deluser=True) - elif command == "deprovision": - agent.deprovision(force, deluser=False) - elif command == "register-service": - agent.register_service() - elif command == "daemon": - agent.daemon() + try: + agent = Agent(verbose) + if command == "deprovision+user": + agent.deprovision(force, deluser=True) + elif command == "deprovision": + agent.deprovision(force, deluser=False) + elif command == "provision": + agent.provision() + elif command == "register-service": + agent.register_service() + elif command == "daemon": + agent.daemon() + elif command == "run-exthandlers": + agent.run_exthandlers() + except Exception as e: + logger.error(u"Failed to run '{0}': {1}", command, e) def parse_args(sys_args): """ @@ -102,6 +152,8 @@ def parse_args(sys_args): cmd = "start" elif re.match("^([-/]*)register-service", a): cmd = "register-service" + elif re.match("^([-/]*)run-exthandlers", a): + cmd = "run-exthandlers" elif re.match("^([-/]*)version", a): cmd = "version" elif re.match("^([-/]*)verbose", a): @@ -128,8 +180,9 @@ def usage(): Show agent usage """ print("") - print((("usage: {0} [-verbose] [-force] [-help]" - "-deprovision[+user]|-register-service|-version|-daemon|-start]" + print((("usage: {0} [-verbose] [-force] [-help] " + "-deprovision[+user]|-register-service|-version|-daemon|-start|" + "-run-exthandlers]" "").format(sys.argv[0]))) print("") @@ -141,3 +194,5 @@ def start(): devnull = open(os.devnull, 'w') subprocess.Popen([sys.argv[0], '-daemon'], stdout=devnull, stderr=devnull) +if __name__ == '__main__' : + main() diff --git a/azurelinuxagent/common/__init__.py b/azurelinuxagent/common/__init__.py new file mode 100644 index 0000000..1ea2f38 --- /dev/null +++ b/azurelinuxagent/common/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + diff --git a/azurelinuxagent/common/conf.py b/azurelinuxagent/common/conf.py new file mode 100644 index 0000000..1a3b0da --- /dev/null +++ b/azurelinuxagent/common/conf.py @@ -0,0 +1,181 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +""" +Module conf loads and parses configuration file +""" +import os +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.exception import AgentConfigError + +class ConfigurationProvider(object): + """ + Parse amd store key:values in /etc/waagent.conf. + """ + def __init__(self): + self.values = dict() + + def load(self, content): + if not content: + raise AgentConfigError("Can't not parse empty configuration") + for line in content.split('\n'): + if not line.startswith("#") and "=" in line: + parts = line.split()[0].split('=') + value = parts[1].strip("\" ") + if value != "None": + self.values[parts[0]] = value + else: + self.values[parts[0]] = None + + def get(self, key, default_val): + val = self.values.get(key) + return val if val is not None else default_val + + def get_switch(self, key, default_val): + val = self.values.get(key) + if val is not None and val.lower() == 'y': + return True + elif val is not None and val.lower() == 'n': + return False + return default_val + + def get_int(self, key, default_val): + try: + return int(self.values.get(key)) + except TypeError: + return default_val + except ValueError: + return default_val + + +__conf__ = ConfigurationProvider() + +def load_conf_from_file(conf_file_path, conf=__conf__): + """ + Load conf file from: conf_file_path + """ + if os.path.isfile(conf_file_path) == False: + raise AgentConfigError(("Missing configuration in {0}" + "").format(conf_file_path)) + try: + content = fileutil.read_file(conf_file_path) + conf.load(content) + except IOError as err: + raise AgentConfigError(("Failed to load conf file:{0}, {1}" + "").format(conf_file_path, err)) + +def enable_rdma(conf=__conf__): + return conf.get_switch("OS.EnableRDMA", False) + +def get_logs_verbose(conf=__conf__): + return conf.get_switch("Logs.Verbose", False) + +def get_lib_dir(conf=__conf__): + return conf.get("Lib.Dir", "/var/lib/waagent") + +def get_dvd_mount_point(conf=__conf__): + return conf.get("DVD.MountPoint", "/mnt/cdrom/secure") + +def get_agent_pid_file_path(conf=__conf__): + return conf.get("Pid.File", "/var/run/waagent.pid") + +def get_ext_log_dir(conf=__conf__): + return conf.get("Extension.LogDir", "/var/log/azure") + +def get_openssl_cmd(conf=__conf__): + return conf.get("OS.OpensslPath", "/usr/bin/openssl") + +def get_home_dir(conf=__conf__): + return conf.get("OS.HomeDir", "/home") + +def get_passwd_file_path(conf=__conf__): + return conf.get("OS.PasswordPath", "/etc/shadow") + +def get_sudoers_dir(conf=__conf__): + return conf.get("OS.SudoersDir", "/etc/sudoers.d") + +def get_sshd_conf_file_path(conf=__conf__): + return conf.get("OS.SshdConfigPath", "/etc/ssh/sshd_config") + +def get_root_device_scsi_timeout(conf=__conf__): + return conf.get("OS.RootDeviceScsiTimeout", None) + +def get_ssh_host_keypair_type(conf=__conf__): + return conf.get("Provisioning.SshHostKeyPairType", "rsa") + +def get_provision_enabled(conf=__conf__): + return conf.get_switch("Provisioning.Enabled", True) + +def get_allow_reset_sys_user(conf=__conf__): + return conf.get_switch("Provisioning.AllowResetSysUser", False) + +def get_regenerate_ssh_host_key(conf=__conf__): + return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False) + +def get_delete_root_password(conf=__conf__): + return conf.get_switch("Provisioning.DeleteRootPassword", False) + +def get_decode_customdata(conf=__conf__): + return conf.get_switch("Provisioning.DecodeCustomData", False) + +def get_execute_customdata(conf=__conf__): + return conf.get_switch("Provisioning.ExecuteCustomData", False) + +def get_password_cryptid(conf=__conf__): + return conf.get("Provisioning.PasswordCryptId", "6") + +def get_password_crypt_salt_len(conf=__conf__): + return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) + +def get_monitor_hostname(conf=__conf__): + return conf.get_switch("Provisioning.MonitorHostName", False) + +def get_httpproxy_host(conf=__conf__): + return conf.get("HttpProxy.Host", None) + +def get_httpproxy_port(conf=__conf__): + return conf.get("HttpProxy.Port", None) + +def get_detect_scvmm_env(conf=__conf__): + return conf.get_switch("DetectScvmmEnv", False) + +def get_resourcedisk_format(conf=__conf__): + return conf.get_switch("ResourceDisk.Format", False) + +def get_resourcedisk_enable_swap(conf=__conf__): + return conf.get_switch("ResourceDisk.EnableSwap", False) + +def get_resourcedisk_mountpoint(conf=__conf__): + return conf.get("ResourceDisk.MountPoint", "/mnt/resource") + +def get_resourcedisk_filesystem(conf=__conf__): + return conf.get("ResourceDisk.Filesystem", "ext3") + +def get_resourcedisk_swap_size_mb(conf=__conf__): + return conf.get_int("ResourceDisk.SwapSizeMB", 0) + +def get_autoupdate_gafamily(conf=__conf__): + return conf.get("AutoUpdate.GAFamily", "Prod") + +def get_autoupdate_enabled(conf=__conf__): + return conf.get_switch("AutoUpdate.Enabled", True) + +def get_autoupdate_frequency(conf=__conf__): + return conf.get_int("Autoupdate.Frequency", 3600) + diff --git a/azurelinuxagent/common/dhcp.py b/azurelinuxagent/common/dhcp.py new file mode 100644 index 0000000..d5c90cb --- /dev/null +++ b/azurelinuxagent/common/dhcp.py @@ -0,0 +1,400 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import os +import socket +import array +import time +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.utils import fileutil +from azurelinuxagent.common.utils.textutil import hex_dump, hex_dump2, \ + hex_dump3, \ + compare_bytes, str_to_ord, \ + unpack_big_endian, \ + int_to_ip4_addr +from azurelinuxagent.common.exception import DhcpError +from azurelinuxagent.common.osutil import get_osutil + +# the kernel routing table representation of 168.63.129.16 +KNOWN_WIRESERVER_IP_ENTRY = '10813FA8' +KNOWN_WIRESERVER_IP = '168.63.129.16' + + +def get_dhcp_handler(): + return DhcpHandler() + + +class DhcpHandler(object): + """ + Azure use DHCP option 245 to pass endpoint ip to VMs. + """ + + def __init__(self): + self.osutil = get_osutil() + self.endpoint = None + self.gateway = None + self.routes = None + self._request_broadcast = False + self.skip_cache = False + + def run(self): + """ + Send dhcp request + Configure default gateway and routes + Save wire server endpoint if found + """ + if self.wireserver_route_exists or self.dhcp_cache_exists: + return + + self.send_dhcp_req() + self.conf_routes() + + def wait_for_network(self): + """ + Wait for network stack to be initialized. + """ + ipv4 = self.osutil.get_ip4_addr() + while ipv4 == '' or ipv4 == '0.0.0.0': + logger.info("Waiting for network.") + time.sleep(10) + logger.info("Try to start network interface.") + self.osutil.start_network() + ipv4 = self.osutil.get_ip4_addr() + + @property + def wireserver_route_exists(self): + """ + Determine whether a route to the known wireserver + ip already exists, and if so use that as the endpoint. + This is true when running in a virtual network. + :return: True if a route to KNOWN_WIRESERVER_IP exists. + """ + route_exists = False + logger.info("test for route to {0}".format(KNOWN_WIRESERVER_IP)) + try: + route_file = '/proc/net/route' + if os.path.exists(route_file) and \ + KNOWN_WIRESERVER_IP_ENTRY in open(route_file).read(): + # reset self.gateway and self.routes + # we do not need to alter the routing table + self.endpoint = KNOWN_WIRESERVER_IP + self.gateway = None + self.routes = None + route_exists = True + logger.info("route to {0} exists".format(KNOWN_WIRESERVER_IP)) + else: + logger.warn( + "no route exists to {0}".format(KNOWN_WIRESERVER_IP)) + except Exception as e: + logger.error( + "could not determine whether route exists to {0}: {1}".format( + KNOWN_WIRESERVER_IP, e)) + + return route_exists + + @property + def dhcp_cache_exists(self): + """ + Check whether the dhcp options cache exists and contains the + wireserver endpoint, unless skip_cache is True. + :return: True if the cached endpoint was found in the dhcp lease + """ + if self.skip_cache: + return False + + exists = False + + logger.info("checking for dhcp lease cache") + cached_endpoint = self.osutil.get_dhcp_lease_endpoint() + if cached_endpoint is not None: + self.endpoint = cached_endpoint + exists = True + logger.info("cache exists [{0}]".format(exists)) + return exists + + def conf_routes(self): + logger.info("Configure routes") + logger.info("Gateway:{0}", self.gateway) + logger.info("Routes:{0}", self.routes) + # Add default gateway + if self.gateway is not None: + self.osutil.route_add(0, 0, self.gateway) + if self.routes is not None: + for route in self.routes: + self.osutil.route_add(route[0], route[1], route[2]) + + def _send_dhcp_req(self, request): + __waiting_duration__ = [0, 10, 30, 60, 60] + for duration in __waiting_duration__: + try: + self.osutil.allow_dhcp_broadcast() + response = socket_send(request) + validate_dhcp_resp(request, response) + return response + except DhcpError as e: + logger.warn("Failed to send DHCP request: {0}", e) + time.sleep(duration) + return None + + def send_dhcp_req(self): + """ + Build dhcp request with mac addr + Configure route to allow dhcp traffic + Stop dhcp service if necessary + """ + logger.info("Send dhcp request") + mac_addr = self.osutil.get_mac_addr() + + # Do unicast first, then fallback to broadcast if fails. + req = build_dhcp_request(mac_addr, self._request_broadcast) + if not self._request_broadcast: + self._request_broadcast = True + + # Temporary allow broadcast for dhcp. Remove the route when done. + missing_default_route = self.osutil.is_missing_default_route() + ifname = self.osutil.get_if_name() + if missing_default_route: + self.osutil.set_route_for_dhcp_broadcast(ifname) + + # In some distros, dhcp service needs to be shutdown before agent probe + # endpoint through dhcp. + if self.osutil.is_dhcp_enabled(): + self.osutil.stop_dhcp_service() + + resp = self._send_dhcp_req(req) + + if self.osutil.is_dhcp_enabled(): + self.osutil.start_dhcp_service() + + if missing_default_route: + self.osutil.remove_route_for_dhcp_broadcast(ifname) + + if resp is None: + raise DhcpError("Failed to receive dhcp response.") + self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) + + +def validate_dhcp_resp(request, response): + bytes_recv = len(response) + if bytes_recv < 0xF6: + logger.error("HandleDhcpResponse: Too few bytes received:{0}", + bytes_recv) + return False + + logger.verbose("BytesReceived:{0}", hex(bytes_recv)) + logger.verbose("DHCP response:{0}", hex_dump(response, bytes_recv)) + + # check transactionId, cookie, MAC address cookie should never mismatch + # transactionId and MAC address may mismatch if we see a response + # meant from another machine + if not compare_bytes(request, response, 0xEC, 4): + logger.verbose("Cookie not match:\nsend={0},\nreceive={1}", + hex_dump3(request, 0xEC, 4), + hex_dump3(response, 0xEC, 4)) + raise DhcpError("Cookie in dhcp respones doesn't match the request") + + if not compare_bytes(request, response, 4, 4): + logger.verbose("TransactionID not match:\nsend={0},\nreceive={1}", + hex_dump3(request, 4, 4), + hex_dump3(response, 4, 4)) + raise DhcpError("TransactionID in dhcp respones " + "doesn't match the request") + + if not compare_bytes(request, response, 0x1C, 6): + logger.verbose("Mac Address not match:\nsend={0},\nreceive={1}", + hex_dump3(request, 0x1C, 6), + hex_dump3(response, 0x1C, 6)) + raise DhcpError("Mac Addr in dhcp respones " + "doesn't match the request") + + +def parse_route(response, option, i, length, bytes_recv): + # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx + logger.verbose("Routes at offset: {0} with length:{1}", hex(i), + hex(length)) + routes = [] + if length < 5: + logger.error("Data too small for option:{0}", option) + j = i + 2 + while j < (i + length + 2): + mask_len_bits = str_to_ord(response[j]) + mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3) + mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits)) + j += 1 + net = unpack_big_endian(response, j, mask_len_bytes) + net <<= (32 - mask_len_bytes * 8) + net &= mask + j += mask_len_bytes + gateway = unpack_big_endian(response, j, 4) + j += 4 + routes.append((net, mask, gateway)) + if j != (i + length + 2): + logger.error("Unable to parse routes") + return routes + + +def parse_ip_addr(response, option, i, length, bytes_recv): + if i + 5 < bytes_recv: + if length != 4: + logger.error("Endpoint or Default Gateway not 4 bytes") + return None + addr = unpack_big_endian(response, i + 2, 4) + ip_addr = int_to_ip4_addr(addr) + return ip_addr + else: + logger.error("Data too small for option:{0}", option) + return None + + +def parse_dhcp_resp(response): + """ + Parse DHCP response: + Returns endpoint server or None on error. + """ + logger.verbose("parse Dhcp Response") + bytes_recv = len(response) + endpoint = None + gateway = None + routes = None + + # Walk all the returned options, parsing out what we need, ignoring the + # others. We need the custom option 245 to find the the endpoint we talk to + # as well as to handle some Linux DHCP client incompatibilities; + # options 3 for default gateway and 249 for routes; 255 is end. + + i = 0xF0 # offset to first option + while i < bytes_recv: + option = str_to_ord(response[i]) + length = 0 + if (i + 1) < bytes_recv: + length = str_to_ord(response[i + 1]) + logger.verbose("DHCP option {0} at offset:{1} with length:{2}", + hex(option), hex(i), hex(length)) + if option == 255: + logger.verbose("DHCP packet ended at offset:{0}", hex(i)) + break + elif option == 249: + routes = parse_route(response, option, i, length, bytes_recv) + elif option == 3: + gateway = parse_ip_addr(response, option, i, length, bytes_recv) + logger.verbose("Default gateway:{0}, at {1}", gateway, hex(i)) + elif option == 245: + endpoint = parse_ip_addr(response, option, i, length, bytes_recv) + logger.verbose("Azure wire protocol endpoint:{0}, at {1}", + endpoint, + hex(i)) + else: + logger.verbose("Skipping DHCP option:{0} at {1} with length {2}", + hex(option), hex(i), hex(length)) + i += length + 2 + return endpoint, gateway, routes + + +def socket_send(request): + sock = None + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.IPPROTO_UDP) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + sock.bind(("0.0.0.0", 68)) + sock.sendto(request, ("", 67)) + sock.settimeout(10) + logger.verbose("Send DHCP request: Setting socket.timeout=10, " + "entering recv") + response = sock.recv(1024) + return response + except IOError as e: + raise DhcpError("{0}".format(e)) + finally: + if sock is not None: + sock.close() + + +def build_dhcp_request(mac_addr, request_broadcast): + """ + Build DHCP request string. + """ + # + # typedef struct _DHCP { + # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ + # UINT8 HardwareAddressType; /* htype: ethernet */ + # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ + # UINT8 Hops; /* hops: 0 */ + # UINT8 TransactionID[4]; /* xid: random */ + # UINT8 Seconds[2]; /* secs: 0 */ + # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast*/ + # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ + # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ + # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ + # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ + # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ + # UINT8 ServerName[64]; /* sname: 0 */ + # UINT8 BootFileName[128]; /* file: 0 */ + # UINT8 MagicCookie[4]; /* 99 130 83 99 */ + # /* 0x63 0x82 0x53 0x63 */ + # /* options -- hard code ours */ + # + # UINT8 MessageTypeCode; /* 53 */ + # UINT8 MessageTypeLength; /* 1 */ + # UINT8 MessageType; /* 1 for DISCOVER */ + # UINT8 End; /* 255 */ + # } DHCP; + # + + # tuple of 244 zeros + # (struct.pack_into would be good here, but requires Python 2.5) + request = [0] * 244 + + trans_id = gen_trans_id() + + # Opcode = 1 + # HardwareAddressType = 1 (ethernet/MAC) + # HardwareAddressLength = 6 (ethernet/MAC/48 bits) + for a in range(0, 3): + request[a] = [1, 1, 6][a] + + # fill in transaction id (random number to ensure response matches request) + for a in range(0, 4): + request[4 + a] = str_to_ord(trans_id[a]) + + logger.verbose("BuildDhcpRequest: transactionId:%s,%04X" % ( + hex_dump2(trans_id), + unpack_big_endian(request, 4, 4))) + + if request_broadcast: + # set broadcast flag to true to request the dhcp sever + # to respond to a boradcast address, + # this is useful when user dhclient fails. + request[0x0A] = 0x80; + + # fill in ClientHardwareAddress + for a in range(0, 6): + request[0x1C + a] = str_to_ord(mac_addr[a]) + + # DHCP Magic Cookie: 99, 130, 83, 99 + # MessageTypeCode = 53 DHCP Message Type + # MessageTypeLength = 1 + # MessageType = DHCPDISCOVER + # End = 255 DHCP_END + for a in range(0, 8): + request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] + return array.array("B", request) + + +def gen_trans_id(): + return os.urandom(4) diff --git a/azurelinuxagent/common/event.py b/azurelinuxagent/common/event.py new file mode 100644 index 0000000..374b0e7 --- /dev/null +++ b/azurelinuxagent/common/event.py @@ -0,0 +1,124 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import sys +import traceback +import atexit +import json +import time +import datetime +import threading +import platform +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import EventError, ProtocolError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ + TelemetryEventList, \ + TelemetryEvent, \ + set_properties, get_properties +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_CODE_NAME, AGENT_VERSION + + +class WALAEventOperation: + HeartBeat="HeartBeat" + Provision = "Provision" + Install = "Install" + UnInstall = "UnInstall" + Disable = "Disable" + Enable = "Enable" + Download = "Download" + Upgrade = "Upgrade" + Update = "Update" + ActivateResourceDisk="ActivateResourceDisk" + UnhandledError="UnhandledError" + +class EventLogger(object): + def __init__(self): + self.event_dir = None + + def save_event(self, data): + if self.event_dir is None: + logger.warn("Event reporter is not initialized.") + return + + if not os.path.exists(self.event_dir): + os.mkdir(self.event_dir) + os.chmod(self.event_dir, 0o700) + if len(os.listdir(self.event_dir)) > 1000: + raise EventError("Too many files under: {0}".format(self.event_dir)) + + filename = os.path.join(self.event_dir, ustr(int(time.time()*1000000))) + try: + with open(filename+".tmp",'wb+') as hfile: + hfile.write(data.encode("utf-8")) + os.rename(filename+".tmp", filename+".tld") + except IOError as e: + raise EventError("Failed to write events to file:{0}", e) + + def add_event(self, name, op="", is_success=True, duration=0, version=AGENT_VERSION, + message="", evt_type="", is_internal=False): + event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") + event.parameters.append(TelemetryEventParam('Name', name)) + event.parameters.append(TelemetryEventParam('Version', str(version))) + event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) + event.parameters.append(TelemetryEventParam('Operation', op)) + event.parameters.append(TelemetryEventParam('OperationSuccess', + is_success)) + event.parameters.append(TelemetryEventParam('Message', message)) + event.parameters.append(TelemetryEventParam('Duration', duration)) + event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) + + data = get_properties(event) + try: + self.save_event(json.dumps(data)) + except EventError as e: + logger.error("{0}", e) + +__event_logger__ = EventLogger() + +def add_event(name, op="", is_success=True, duration=0, version=AGENT_VERSION, + message="", evt_type="", is_internal=False, + reporter=__event_logger__): + log = logger.info if is_success else logger.error + log("Event: name={0}, op={1}, message={2}", name, op, message) + + if reporter.event_dir is None: + logger.warn("Event reporter is not initialized.") + return + reporter.add_event(name, op=op, is_success=is_success, duration=duration, + version=str(version), message=message, evt_type=evt_type, + is_internal=is_internal) + +def init_event_logger(event_dir, reporter=__event_logger__): + reporter.event_dir = event_dir + +def dump_unhandled_err(name): + if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ + hasattr(sys, 'last_traceback'): + last_type = getattr(sys, 'last_type') + last_value = getattr(sys, 'last_value') + last_traceback = getattr(sys, 'last_traceback') + error = traceback.format_exception(last_type, last_value, + last_traceback) + message= "".join(error) + add_event(name, is_success=False, message=message, + op=WALAEventOperation.UnhandledError) + +def enable_unhandled_err_dump(name): + atexit.register(dump_unhandled_err, name) diff --git a/azurelinuxagent/common/exception.py b/azurelinuxagent/common/exception.py new file mode 100644 index 0000000..457490c --- /dev/null +++ b/azurelinuxagent/common/exception.py @@ -0,0 +1,123 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +""" +Defines all exceptions +""" + +class AgentError(Exception): + """ + Base class of agent error. + """ + def __init__(self, errno, msg, inner=None): + msg = u"({0}){1}".format(errno, msg) + if inner is not None: + msg = u"{0} \n inner error: {1}".format(msg, inner) + super(AgentError, self).__init__(msg) + +class AgentConfigError(AgentError): + """ + When configure file is not found or malformed. + """ + def __init__(self, msg=None, inner=None): + super(AgentConfigError, self).__init__('000001', msg, inner) + +class AgentNetworkError(AgentError): + """ + When network is not avaiable. + """ + def __init__(self, msg=None, inner=None): + super(AgentNetworkError, self).__init__('000002', msg, inner) + +class ExtensionError(AgentError): + """ + When failed to execute an extension + """ + def __init__(self, msg=None, inner=None): + super(ExtensionError, self).__init__('000003', msg, inner) + +class ProvisionError(AgentError): + """ + When provision failed + """ + def __init__(self, msg=None, inner=None): + super(ProvisionError, self).__init__('000004', msg, inner) + +class ResourceDiskError(AgentError): + """ + Mount resource disk failed + """ + def __init__(self, msg=None, inner=None): + super(ResourceDiskError, self).__init__('000005', msg, inner) + +class DhcpError(AgentError): + """ + Failed to handle dhcp response + """ + def __init__(self, msg=None, inner=None): + super(DhcpError, self).__init__('000006', msg, inner) + +class OSUtilError(AgentError): + """ + Failed to perform operation to OS configuration + """ + def __init__(self, msg=None, inner=None): + super(OSUtilError, self).__init__('000007', msg, inner) + +class ProtocolError(AgentError): + """ + Azure protocol error + """ + def __init__(self, msg=None, inner=None): + super(ProtocolError, self).__init__('000008', msg, inner) + +class ProtocolNotFoundError(ProtocolError): + """ + Azure protocol endpoint not found + """ + def __init__(self, msg=None, inner=None): + super(ProtocolNotFoundError, self).__init__(msg, inner) + +class HttpError(AgentError): + """ + Http request failure + """ + def __init__(self, msg=None, inner=None): + super(HttpError, self).__init__('000009', msg, inner) + +class EventError(AgentError): + """ + Event reporting error + """ + def __init__(self, msg=None, inner=None): + super(EventError, self).__init__('000010', msg, inner) + +class CryptError(AgentError): + """ + Encrypt/Decrypt error + """ + def __init__(self, msg=None, inner=None): + super(CryptError, self).__init__('000011', msg, inner) + +class UpdateError(AgentError): + """ + Update Guest Agent error + """ + def __init__(self, msg=None, inner=None): + super(UpdateError, self).__init__('000012', msg, inner) + diff --git a/azurelinuxagent/common/future.py b/azurelinuxagent/common/future.py new file mode 100644 index 0000000..8509732 --- /dev/null +++ b/azurelinuxagent/common/future.py @@ -0,0 +1,31 @@ +import sys + +""" +Add alies for python2 and python3 libs and fucntions. +""" + +if sys.version_info[0]== 3: + import http.client as httpclient + from urllib.parse import urlparse + + """Rename Python3 str to ustr""" + ustr = str + + bytebuffer = memoryview + + read_input = input + +elif sys.version_info[0] == 2: + import httplib as httpclient + from urlparse import urlparse + + """Rename Python2 unicode to ustr""" + ustr = unicode + + bytebuffer = buffer + + read_input = raw_input + +else: + raise ImportError("Unknown python version:{0}".format(sys.version_info)) + diff --git a/azurelinuxagent/common/logger.py b/azurelinuxagent/common/logger.py new file mode 100644 index 0000000..c1eb18f --- /dev/null +++ b/azurelinuxagent/common/logger.py @@ -0,0 +1,156 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and openssl_bin 1.0+ +# +""" +Log utils +""" +import os +import sys +from azurelinuxagent.common.future import ustr +from datetime import datetime + +class Logger(object): + """ + Logger class + """ + def __init__(self, logger=None, prefix=None): + self.appenders = [] + if logger is not None: + self.appenders.extend(logger.appenders) + self.prefix = prefix + + def verbose(self, msg_format, *args): + self.log(LogLevel.VERBOSE, msg_format, *args) + + def info(self, msg_format, *args): + self.log(LogLevel.INFO, msg_format, *args) + + def warn(self, msg_format, *args): + self.log(LogLevel.WARNING, msg_format, *args) + + def error(self, msg_format, *args): + self.log(LogLevel.ERROR, msg_format, *args) + + def log(self, level, msg_format, *args): + #if msg_format is not unicode convert it to unicode + if type(msg_format) is not ustr: + msg_format = ustr(msg_format, errors="backslashreplace") + if len(args) > 0: + msg = msg_format.format(*args) + else: + msg = msg_format + time = datetime.now().strftime(u'%Y/%m/%d %H:%M:%S.%f') + level_str = LogLevel.STRINGS[level] + if self.prefix is not None: + log_item = u"{0} {1} {2} {3}\n".format(time, level_str, self.prefix, + msg) + else: + log_item = u"{0} {1} {2}\n".format(time, level_str, msg) + + log_item = ustr(log_item.encode('ascii', "backslashreplace"), + encoding="ascii") + for appender in self.appenders: + appender.write(level, log_item) + + def add_appender(self, appender_type, level, path): + appender = _create_logger_appender(appender_type, level, path) + self.appenders.append(appender) + +class ConsoleAppender(object): + def __init__(self, level, path): + self.level = level + self.path = path + + def write(self, level, msg): + if self.level <= level: + try: + with open(self.path, "w") as console: + console.write(msg) + except IOError: + pass + +class FileAppender(object): + def __init__(self, level, path): + self.level = level + self.path = path + + def write(self, level, msg): + if self.level <= level: + try: + with open(self.path, "a+") as log_file: + log_file.write(msg) + except IOError: + pass + +class StdoutAppender(object): + def __init__(self, level): + self.level = level + + def write(self, level, msg): + if self.level <= level: + try: + sys.stdout.write(msg) + except IOError: + pass + +#Initialize logger instance +DEFAULT_LOGGER = Logger() + +class LogLevel(object): + VERBOSE = 0 + INFO = 1 + WARNING = 2 + ERROR = 3 + STRINGS = [ + "VERBOSE", + "INFO", + "WARNING", + "ERROR" + ] + +class AppenderType(object): + FILE = 0 + CONSOLE = 1 + STDOUT = 2 + +def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): + DEFAULT_LOGGER.add_appender(appender_type, level, path) + +def verbose(msg_format, *args): + DEFAULT_LOGGER.verbose(msg_format, *args) + +def info(msg_format, *args): + DEFAULT_LOGGER.info(msg_format, *args) + +def warn(msg_format, *args): + DEFAULT_LOGGER.warn(msg_format, *args) + +def error(msg_format, *args): + DEFAULT_LOGGER.error(msg_format, *args) + +def log(level, msg_format, *args): + DEFAULT_LOGGER.log(level, msg_format, args) + +def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): + if appender_type == AppenderType.CONSOLE: + return ConsoleAppender(level, path) + elif appender_type == AppenderType.FILE: + return FileAppender(level, path) + elif appender_type == AppenderType.STDOUT: + return StdoutAppender(level) + else: + raise ValueError("Unknown appender type") + diff --git a/azurelinuxagent/common/osutil/__init__.py b/azurelinuxagent/common/osutil/__init__.py new file mode 100644 index 0000000..3b5ba3b --- /dev/null +++ b/azurelinuxagent/common/osutil/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.osutil.factory import get_osutil diff --git a/azurelinuxagent/common/osutil/coreos.py b/azurelinuxagent/common/osutil/coreos.py new file mode 100644 index 0000000..e26fd97 --- /dev/null +++ b/azurelinuxagent/common/osutil/coreos.py @@ -0,0 +1,92 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import pwd +import shutil +import socket +import array +import struct +import fcntl +import time +import base64 +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class CoreOSUtil(DefaultOSUtil): + def __init__(self): + super(CoreOSUtil, self).__init__() + self.agent_conf_file_path = '/usr/share/oem/waagent.conf' + self.waagent_path='/usr/share/oem/bin/waagent' + self.python_path='/usr/share/oem/python/bin' + if 'PATH' in os.environ: + path = "{0}:{1}".format(os.environ['PATH'], self.python_path) + else: + path = self.python_path + os.environ['PATH'] = path + + if 'PYTHONPATH' in os.environ: + py_path = os.environ['PYTHONPATH'] + py_path = "{0}:{1}".format(py_path, self.waagent_path) + else: + py_path = self.waagent_path + os.environ['PYTHONPATH'] = py_path + + def is_sys_user(self, username): + #User 'core' is not a sysuser + if username == 'core': + return False + return super(CoreOSUtil, self).is_sys_user(username) + + def is_dhcp_enabled(self): + return True + + def start_network(self) : + return shellutil.run("systemctl start systemd-networkd", chk_err=False) + + def restart_if(self, iface): + shellutil.run("systemctl restart systemd-networkd") + + def restart_ssh_service(self): + # SSH is socket activated on CoreOS. No need to restart it. + pass + + def stop_dhcp_service(self): + return shellutil.run("systemctl stop systemd-networkd", chk_err=False) + + def start_dhcp_service(self): + return shellutil.run("systemctl start systemd-networkd", chk_err=False) + + def start_agent_service(self): + return shellutil.run("systemctl start wagent", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("systemctl stop wagent", chk_err=False) + + def get_dhcp_pid(self): + ret= shellutil.run_get_output("pidof systemd-networkd") + return ret[1] if ret[0] == 0 else None + + def conf_sshd(self, disable_password): + #In CoreOS, /etc/sshd_config is mount readonly. Skip the setting + pass + diff --git a/azurelinuxagent/common/osutil/debian.py b/azurelinuxagent/common/osutil/debian.py new file mode 100644 index 0000000..f455572 --- /dev/null +++ b/azurelinuxagent/common/osutil/debian.py @@ -0,0 +1,47 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import pwd +import shutil +import socket +import array +import struct +import fcntl +import time +import base64 +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class DebianOSUtil(DefaultOSUtil): + def __init__(self): + super(DebianOSUtil, self).__init__() + + def restart_ssh_service(self): + return shellutil.run("service sshd restart", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("service azurelinuxagent stop", chk_err=False) + + def start_agent_service(self): + return shellutil.run("service azurelinuxagent start", chk_err=False) + diff --git a/azurelinuxagent/common/osutil/default.py b/azurelinuxagent/common/osutil/default.py new file mode 100644 index 0000000..c243c85 --- /dev/null +++ b/azurelinuxagent/common/osutil/default.py @@ -0,0 +1,792 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import shutil +import socket +import array +import struct +import time +import pwd +import fcntl +import base64 +import glob +import datetime +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil + +__RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", + "/etc/udev/rules.d/70-persistent-net.rules" ] + +""" +Define distro specific behavior. OSUtil class defines default behavior +for all distros. Each concrete distro classes could overwrite default behavior +if needed. +""" + +class DefaultOSUtil(object): + + def __init__(self): + self.agent_conf_file_path = '/etc/waagent.conf' + self.selinux=None + + def get_agent_conf_file_path(self): + return self.agent_conf_file_path + + def get_userentry(self, username): + try: + return pwd.getpwnam(username) + except KeyError: + return None + + def is_sys_user(self, username): + """ + Check whether use is a system user. + If reset sys user is allowed in conf, return False + Otherwise, check whether UID is less than UID_MIN + """ + if conf.get_allow_reset_sys_user(): + return False + + userentry = self.get_userentry(username) + uidmin = None + try: + uidmin_def = fileutil.get_line_startingwith("UID_MIN", + "/etc/login.defs") + if uidmin_def is not None: + uidmin = int(uidmin_def.split()[1]) + except IOError as e: + pass + if uidmin == None: + uidmin = 100 + if userentry != None and userentry[2] < uidmin: + return True + else: + return False + + def useradd(self, username, expiration=None): + """ + Create user account with 'username' + """ + userentry = self.get_userentry(username) + if userentry is not None: + logger.info("User {0} already exists, skip useradd", username) + return + + if expiration is not None: + cmd = "useradd -m {0} -e {1}".format(username, expiration) + else: + cmd = "useradd -m {0}".format(username) + retcode, out = shellutil.run_get_output(cmd) + if retcode != 0: + raise OSUtilError(("Failed to create user account:{0}, " + "retcode:{1}, " + "output:{2}").format(username, retcode, out)) + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + if self.is_sys_user(username): + raise OSUtilError(("User {0} is a system user. " + "Will not set passwd.").format(username)) + passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) + ret, output = shellutil.run_get_output(cmd, log_cmd=False) + if ret != 0: + raise OSUtilError(("Failed to set password for {0}: {1}" + "").format(username, output)) + + def conf_sudoer(self, username, nopasswd=False, remove=False): + sudoers_dir = conf.get_sudoers_dir() + sudoers_wagent = os.path.join(sudoers_dir, 'waagent') + + if not remove: + # for older distros create sudoers.d + if not os.path.isdir(sudoers_dir): + sudoers_file = os.path.join(sudoers_dir, '../sudoers') + # create the sudoers.d directory + os.mkdir(sudoers_dir) + # add the include of sudoers.d to the /etc/sudoers + sudoers = '\n#includedir ' + sudoers_dir + '\n' + fileutil.append_file(sudoers_file, sudoers) + sudoer = None + if nopasswd: + sudoer = "{0} ALL=(ALL) NOPASSWD: ALL\n".format(username) + else: + sudoer = "{0} ALL=(ALL) ALL\n".format(username) + fileutil.append_file(sudoers_wagent, sudoer) + fileutil.chmod(sudoers_wagent, 0o440) + else: + #Remove user from sudoers + if os.path.isfile(sudoers_wagent): + try: + content = fileutil.read_file(sudoers_wagent) + sudoers = content.split("\n") + sudoers = [x for x in sudoers if username not in x] + fileutil.write_file(sudoers_wagent, "\n".join(sudoers)) + except IOError as e: + raise OSUtilError("Failed to remove sudoer: {0}".format(e)) + + def del_root_password(self): + try: + passwd_file_path = conf.get_passwd_file_path() + passwd_content = fileutil.read_file(passwd_file_path) + passwd = passwd_content.split('\n') + new_passwd = [x for x in passwd if not x.startswith("root:")] + new_passwd.insert(0, "root:*LOCK*:14600::::::") + fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) + except IOError as e: + raise OSUtilError("Failed to delete root password:{0}".format(e)) + + def _norm_path(self, filepath): + home = conf.get_home_dir() + # Expand HOME variable if present in path + path = os.path.normpath(filepath.replace("$HOME", home)) + return path + + def deploy_ssh_keypair(self, username, keypair): + """ + Deploy id_rsa and id_rsa.pub + """ + path, thumbprint = keypair + path = self._norm_path(path) + dir_path = os.path.dirname(path) + fileutil.mkdir(dir_path, mode=0o700, owner=username) + lib_dir = conf.get_lib_dir() + prv_path = os.path.join(lib_dir, thumbprint + '.prv') + if not os.path.isfile(prv_path): + raise OSUtilError("Can't find {0}.prv".format(thumbprint)) + shutil.copyfile(prv_path, path) + pub_path = path + '.pub' + crytputil = CryptUtil(conf.get_openssl_cmd()) + pub = crytputil.get_pubkey_from_prv(prv_path) + fileutil.write_file(pub_path, pub) + self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') + self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') + os.chmod(path, 0o644) + os.chmod(pub_path, 0o600) + + def openssl_to_openssh(self, input_file, output_file): + cryptutil = CryptUtil(conf.get_openssl_cmd()) + cryptutil.crt_to_ssh(input_file, output_file) + + def deploy_ssh_pubkey(self, username, pubkey): + """ + Deploy authorized_key + """ + path, thumbprint, value = pubkey + if path is None: + raise OSUtilError("Public key path is None") + + crytputil = CryptUtil(conf.get_openssl_cmd()) + + path = self._norm_path(path) + dir_path = os.path.dirname(path) + fileutil.mkdir(dir_path, mode=0o700, owner=username) + if value is not None: + if not value.startswith("ssh-"): + raise OSUtilError("Bad public key: {0}".format(value)) + fileutil.write_file(path, value) + elif thumbprint is not None: + lib_dir = conf.get_lib_dir() + crt_path = os.path.join(lib_dir, thumbprint + '.crt') + if not os.path.isfile(crt_path): + raise OSUtilError("Can't find {0}.crt".format(thumbprint)) + pub_path = os.path.join(lib_dir, thumbprint + '.pub') + pub = crytputil.get_pubkey_from_crt(crt_path) + fileutil.write_file(pub_path, pub) + self.set_selinux_context(pub_path, + 'unconfined_u:object_r:ssh_home_t:s0') + self.openssl_to_openssh(pub_path, path) + fileutil.chmod(pub_path, 0o600) + else: + raise OSUtilError("SSH public key Fingerprint and Value are None") + + self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') + fileutil.chowner(path, username) + fileutil.chmod(path, 0o644) + + def is_selinux_system(self): + """ + Checks and sets self.selinux = True if SELinux is available on system. + """ + if self.selinux == None: + if shellutil.run("which getenforce", chk_err=False) == 0: + self.selinux = True + else: + self.selinux = False + return self.selinux + + def is_selinux_enforcing(self): + """ + Calls shell command 'getenforce' and returns True if 'Enforcing'. + """ + if self.is_selinux_system(): + output = shellutil.run_get_output("getenforce")[1] + return output.startswith("Enforcing") + else: + return False + + def set_selinux_enforce(self, state): + """ + Calls shell command 'setenforce' with 'state' + and returns resulting exit code. + """ + if self.is_selinux_system(): + if state: s = '1' + else: s='0' + return shellutil.run("setenforce "+s) + + def set_selinux_context(self, path, con): + """ + Calls shell 'chcon' with 'path' and 'con' context. + Returns exit result. + """ + if self.is_selinux_system(): + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + return 1 + return shellutil.run('chcon ' + con + ' ' + path) + + def conf_sshd(self, disable_password): + option = "no" if disable_password else "yes" + conf_file_path = conf.get_sshd_conf_file_path() + conf_file = fileutil.read_file(conf_file_path).split("\n") + textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) + textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", option) + textutil.set_ssh_config(conf_file, "ClientAliveInterval", "180") + fileutil.write_file(conf_file_path, "\n".join(conf_file)) + logger.info("{0} SSH password-based authentication methods." + .format("Disabled" if disable_password else "Enabled")) + logger.info("Configured SSH client probing to keep connections alive.") + + + def get_dvd_device(self, dev_dir='/dev'): + pattern=r'(sr[0-9]|hd[c-z]|cdrom[0-9]|cd[0-9])' + for dvd in [re.match(pattern, dev) for dev in os.listdir(dev_dir)]: + if dvd is not None: + return "/dev/{0}".format(dvd.group(0)) + raise OSUtilError("Failed to get dvd device") + + def mount_dvd(self, max_retry=6, chk_err=True, dvd_device=None, mount_point=None): + if dvd_device is None: + dvd_device = self.get_dvd_device() + if mount_point is None: + mount_point = conf.get_dvd_mount_point() + mountlist = shellutil.run_get_output("mount")[1] + existing = self.get_mount_point(mountlist, dvd_device) + if existing is not None: #Already mounted + logger.info("{0} is already mounted at {1}", dvd_device, existing) + return + if not os.path.isdir(mount_point): + os.makedirs(mount_point) + + for retry in range(0, max_retry): + retcode = self.mount(dvd_device, mount_point, option="-o ro -t udf,iso9660", + chk_err=chk_err) + if retcode == 0: + logger.info("Successfully mounted dvd") + return + if retry < max_retry - 1: + logger.warn("Mount dvd failed: retry={0}, ret={1}", retry, + retcode) + time.sleep(5) + if chk_err: + raise OSUtilError("Failed to mount dvd.") + + def umount_dvd(self, chk_err=True, mount_point=None): + if mount_point is None: + mount_point = conf.get_dvd_mount_point() + retcode = self.umount(mount_point, chk_err=chk_err) + if chk_err and retcode != 0: + raise OSUtilError("Failed to umount dvd.") + + def eject_dvd(self, chk_err=True): + dvd = self.get_dvd_device() + retcode = shellutil.run("eject {0}".format(dvd)) + if chk_err and retcode != 0: + raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) + + def try_load_atapiix_mod(self): + try: + self.load_atapiix_mod() + except Exception as e: + logger.warn("Could not load ATAPI driver: {0}".format(e)) + + def load_atapiix_mod(self): + if self.is_atapiix_mod_loaded(): + return + ret, kern_version = shellutil.run_get_output("uname -r") + if ret != 0: + raise Exception("Failed to call uname -r") + mod_path = os.path.join('/lib/modules', + kern_version.strip('\n'), + 'kernel/drivers/ata/ata_piix.ko') + if not os.path.isfile(mod_path): + raise Exception("Can't find module file:{0}".format(mod_path)) + + ret, output = shellutil.run_get_output("insmod " + mod_path) + if ret != 0: + raise Exception("Error calling insmod for ATAPI CD-ROM driver") + if not self.is_atapiix_mod_loaded(max_retry=3): + raise Exception("Failed to load ATAPI CD-ROM driver") + + def is_atapiix_mod_loaded(self, max_retry=1): + for retry in range(0, max_retry): + ret = shellutil.run("lsmod | grep ata_piix", chk_err=False) + if ret == 0: + logger.info("Module driver for ATAPI CD-ROM is already present.") + return True + if retry < max_retry - 1: + time.sleep(1) + return False + + def mount(self, dvd, mount_point, option="", chk_err=True): + cmd = "mount {0} {1} {2}".format(option, dvd, mount_point) + return shellutil.run_get_output(cmd, chk_err)[0] + + def umount(self, mount_point, chk_err=True): + return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) + + def allow_dhcp_broadcast(self): + #Open DHCP port if iptables is enabled. + # We supress error logging on error. + shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", + chk_err=False) + shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", + chk_err=False) + + + def remove_rules_files(self, rules_files=__RULES_FILES__): + lib_dir = conf.get_lib_dir() + for src in rules_files: + file_name = fileutil.base_name(src) + dest = os.path.join(lib_dir, file_name) + if os.path.isfile(dest): + os.remove(dest) + if os.path.isfile(src): + logger.warn("Move rules file {0} to {1}", file_name, dest) + shutil.move(src, dest) + + def restore_rules_files(self, rules_files=__RULES_FILES__): + lib_dir = conf.get_lib_dir() + for dest in rules_files: + filename = fileutil.base_name(dest) + src = os.path.join(lib_dir, filename) + if os.path.isfile(dest): + continue + if os.path.isfile(src): + logger.warn("Move rules file {0} to {1}", filename, dest) + shutil.move(src, dest) + + def get_mac_addr(self): + """ + Convienience function, returns mac addr bound to + first non-loopback interface. + """ + ifname='' + while len(ifname) < 2 : + ifname=self.get_first_if()[0] + addr = self.get_if_mac(ifname) + return textutil.hexstr_to_bytearray(addr) + + def get_if_mac(self, ifname): + """ + Return the mac-address bound to the socket. + """ + sock = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP) + param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) + info = fcntl.ioctl(sock.fileno(), 0x8927, param) + return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) + + def get_first_if(self): + """ + Return the interface name, and ip addr of the + first active non-loopback interface. + """ + iface='' + expected=16 # how many devices should I expect... + struct_size=40 # for 64bit the size is 40 bytes + sock = socket.socket(socket.AF_INET, + socket.SOCK_DGRAM, + socket.IPPROTO_UDP) + buff=array.array('B', b'\0' * (expected * struct_size)) + param = struct.pack('iL', + expected*struct_size, + buff.buffer_info()[0]) + ret = fcntl.ioctl(sock.fileno(), 0x8912, param) + retsize=(struct.unpack('iL', ret)[0]) + if retsize == (expected * struct_size): + logger.warn(('SIOCGIFCONF returned more than {0} up ' + 'network interfaces.'), expected) + sock = buff.tostring() + primary = bytearray(self.get_primary_interface(), encoding='utf-8') + for i in range(0, struct_size * expected, struct_size): + iface=sock[i:i+16].split(b'\0', 1)[0] + if len(iface) == 0 or self.is_loopback(iface) or iface != primary: + # test the next one + logger.info('interface [{0}] skipped'.format(iface)) + continue + else: + # use this one + logger.info('interface [{0}] selected'.format(iface)) + break + + return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) + + def get_primary_interface(self): + """ + Get the name of the primary interface, which is the one with the + default route attached to it; if there are multiple default routes, + the primary has the lowest Metric. + :return: the interface which has the default route + """ + # from linux/route.h + RTF_GATEWAY = 0x02 + DEFAULT_DEST = "00000000" + + hdr_iface = "Iface" + hdr_dest = "Destination" + hdr_flags = "Flags" + hdr_metric = "Metric" + + idx_iface = -1 + idx_dest = -1 + idx_flags = -1 + idx_metric = -1 + primary = None + primary_metric = None + + logger.info("examine /proc/net/route for primary interface") + with open('/proc/net/route') as routing_table: + idx = 0 + for header in filter(lambda h: len(h) > 0, routing_table.readline().strip(" \n").split("\t")): + if header == hdr_iface: + idx_iface = idx + elif header == hdr_dest: + idx_dest = idx + elif header == hdr_flags: + idx_flags = idx + elif header == hdr_metric: + idx_metric = idx + idx = idx + 1 + for entry in routing_table.readlines(): + route = entry.strip(" \n").split("\t") + if route[idx_dest] == DEFAULT_DEST and int(route[idx_flags]) & RTF_GATEWAY == RTF_GATEWAY: + metric = int(route[idx_metric]) + iface = route[idx_iface] + if primary is None or metric < primary_metric: + primary = iface + primary_metric = metric + + if primary is None: + primary = '' + + logger.info('primary interface is [{0}]'.format(primary)) + return primary + + + def is_primary_interface(self, ifname): + """ + Indicate whether the specified interface is the primary. + :param ifname: the name of the interface - eth0, lo, etc. + :return: True if this interface binds the default route + """ + return self.get_primary_interface() == ifname + + + def is_loopback(self, ifname): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) + result = fcntl.ioctl(s.fileno(), 0x8913, struct.pack('256s', ifname[:15])) + flags, = struct.unpack('H', result[16:18]) + isloopback = flags & 8 == 8 + logger.info('interface [{0}] has flags [{1}], is loopback [{2}]'.format(ifname, flags, isloopback)) + return isloopback + + def get_dhcp_lease_endpoint(self): + """ + OS specific, this should return the decoded endpoint of + the wireserver from option 245 in the dhcp leases file + if it exists on disk. + :return: The endpoint if available, or None + """ + return None + + @staticmethod + def get_endpoint_from_leases_path(pathglob): + """ + Try to discover and decode the wireserver endpoint in the + specified dhcp leases path. + :param pathglob: The path containing dhcp lease files + :return: The endpoint if available, otherwise None + """ + endpoint = None + + HEADER_LEASE = "lease" + HEADER_OPTION = "option unknown-245" + HEADER_DNS = "option domain-name-servers" + HEADER_EXPIRE = "expire" + FOOTER_LEASE = "}" + FORMAT_DATETIME = "%Y/%m/%d %H:%M:%S" + + logger.info("looking for leases in path [{0}]".format(pathglob)) + for lease_file in glob.glob(pathglob): + leases = open(lease_file).read() + if HEADER_OPTION in leases: + cached_endpoint = None + has_option_245 = False + expired = True # assume expired + for line in leases.splitlines(): + if line.startswith(HEADER_LEASE): + cached_endpoint = None + has_option_245 = False + expired = True + elif HEADER_DNS in line: + cached_endpoint = line.replace(HEADER_DNS, '').strip(" ;") + elif HEADER_OPTION in line: + has_option_245 = True + elif HEADER_EXPIRE in line: + if "never" in line: + expired = False + else: + try: + expire_string = line.split(" ", 4)[-1].strip(";") + expire_date = datetime.datetime.strptime(expire_string, FORMAT_DATETIME) + if expire_date > datetime.datetime.utcnow(): + expired = False + except: + logger.error("could not parse expiry token '{0}'".format(line)) + elif FOOTER_LEASE in line: + logger.info("dhcp entry:{0}, 245:{1}, expired:{2}".format( + cached_endpoint, has_option_245, expired)) + if not expired and cached_endpoint is not None and has_option_245: + endpoint = cached_endpoint + logger.info("found endpoint [{0}]".format(endpoint)) + # we want to return the last valid entry, so + # keep searching + if endpoint is not None: + logger.info("cached endpoint found [{0}]".format(endpoint)) + else: + logger.info("cached endpoint not found") + return endpoint + + def is_missing_default_route(self): + routes = shellutil.run_get_output("route -n")[1] + for route in routes.split("\n"): + if route.startswith("0.0.0.0 ") or route.startswith("default "): + return False + return True + + def get_if_name(self): + return self.get_first_if()[0] + + def get_ip4_addr(self): + return self.get_first_if()[1] + + def set_route_for_dhcp_broadcast(self, ifname): + return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname), + chk_err=False) + + def remove_route_for_dhcp_broadcast(self, ifname): + shellutil.run("route del 255.255.255.255 dev {0}".format(ifname), + chk_err=False) + + def is_dhcp_enabled(self): + return False + + def stop_dhcp_service(self): + pass + + def start_dhcp_service(self): + pass + + def start_network(self): + pass + + def start_agent_service(self): + pass + + def stop_agent_service(self): + pass + + def register_agent_service(self): + pass + + def unregister_agent_service(self): + pass + + def restart_ssh_service(self): + pass + + def route_add(self, net, mask, gateway): + """ + Add specified route using /sbin/route add -net. + """ + cmd = ("/sbin/route add -net " + "{0} netmask {1} gw {2}").format(net, mask, gateway) + return shellutil.run(cmd, chk_err=False) + + def get_dhcp_pid(self): + ret= shellutil.run_get_output("pidof dhclient") + return ret[1] if ret[0] == 0 else None + + def set_hostname(self, hostname): + fileutil.write_file('/etc/hostname', hostname) + shellutil.run("hostname {0}".format(hostname), chk_err=False) + + def set_dhcp_hostname(self, hostname): + autosend = r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])' + dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf', '/etc/dhclient.conf'] + for conf_file in dhclient_files: + if not os.path.isfile(conf_file): + continue + if fileutil.findstr_in_file(conf_file, autosend): + #Return if auto send host-name is configured + return + fileutil.update_conf_file(conf_file, + 'send host-name', + 'send host-name "{0}";'.format(hostname)) + + def restart_if(self, ifname, retries=3, wait=5): + retry_limit=retries+1 + for attempt in range(1, retry_limit): + return_code=shellutil.run("ifdown {0} && ifup {0}".format(ifname)) + if return_code == 0: + return + logger.warn("failed to restart {0}: return code {1}".format(ifname, return_code)) + if attempt < retry_limit: + logger.info("retrying in {0} seconds".format(wait)) + time.sleep(wait) + else: + logger.warn("exceeded restart retries") + + def publish_hostname(self, hostname): + self.set_dhcp_hostname(hostname) + ifname = self.get_if_name() + self.restart_if(ifname) + + def set_scsi_disks_timeout(self, timeout): + for dev in os.listdir("/sys/block"): + if dev.startswith('sd'): + self.set_block_device_timeout(dev, timeout) + + def set_block_device_timeout(self, dev, timeout): + if dev is not None and timeout is not None: + file_path = "/sys/block/{0}/device/timeout".format(dev) + content = fileutil.read_file(file_path) + original = content.splitlines()[0].rstrip() + if original != timeout: + fileutil.write_file(file_path, timeout) + logger.info("Set block dev timeout: {0} with timeout: {1}", + dev, timeout) + + def get_mount_point(self, mountlist, device): + """ + Example of mountlist: + /dev/sda1 on / type ext4 (rw) + proc on /proc type proc (rw) + sysfs on /sys type sysfs (rw) + devpts on /dev/pts type devpts (rw,gid=5,mode=620) + tmpfs on /dev/shm type tmpfs + (rw,rootcontext="system_u:object_r:tmpfs_t:s0") + none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) + /dev/sdb1 on /mnt/resource type ext4 (rw) + """ + if (mountlist and device): + for entry in mountlist.split('\n'): + if(re.search(device, entry)): + tokens = entry.split() + #Return the 3rd column of this line + return tokens[2] if len(tokens) > 2 else None + return None + + def device_for_ide_port(self, port_id): + """ + Return device name attached to ide port 'n'. + """ + if port_id > 3: + return None + g0 = "00000000" + if port_id > 1: + g0 = "00000001" + port_id = port_id - 2 + device = None + path = "/sys/bus/vmbus/devices/" + for vmbus in os.listdir(path): + deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) + guid = deviceid.lstrip('{').split('-') + if guid[0] == g0 and guid[1] == "000" + ustr(port_id): + for root, dirs, files in os.walk(path + vmbus): + if root.endswith("/block"): + device = dirs[0] + break + else : #older distros + for d in dirs: + if ':' in d and "block" == d.split(':')[0]: + device = d.split(':')[1] + break + break + return device + + def del_account(self, username): + if self.is_sys_user(username): + logger.error("{0} is a system user. Will not delete it.", username) + shellutil.run("> /var/run/utmp") + shellutil.run("userdel -f -r " + username) + self.conf_sudoer(username, remove=True) + + def decode_customdata(self, data): + return base64.b64decode(data) + + def get_total_mem(self): + cmd = "grep MemTotal /proc/meminfo |awk '{print $2}'" + ret = shellutil.run_get_output(cmd) + if ret[0] == 0: + return int(ret[1])/1024 + else: + raise OSUtilError("Failed to get total memory: {0}".format(ret[1])) + + def get_processor_cores(self): + ret = shellutil.run_get_output("grep 'processor.*:' /proc/cpuinfo |wc -l") + if ret[0] == 0: + return int(ret[1]) + else: + raise OSUtilError("Failed to get processor cores") + + def set_admin_access_to_ip(self, dest_ip): + #This allows root to access dest_ip + rm_old= "iptables -D OUTPUT -d {0} -j ACCEPT -m owner --uid-owner 0" + rule = "iptables -A OUTPUT -d {0} -j ACCEPT -m owner --uid-owner 0" + shellutil.run(rm_old.format(dest_ip), chk_err=False) + shellutil.run(rule.format(dest_ip)) + + #This blocks all other users to access dest_ip + rm_old = "iptables -D OUTPUT -d {0} -j DROP" + rule = "iptables -A OUTPUT -d {0} -j DROP" + shellutil.run(rm_old.format(dest_ip), chk_err=False) + shellutil.run(rule.format(dest_ip)) + + def check_pid_alive(self, pid): + return pid is not None and os.path.isdir(os.path.join('/proc', pid)) diff --git a/azurelinuxagent/common/osutil/factory.py b/azurelinuxagent/common/osutil/factory.py new file mode 100644 index 0000000..5e8ae6e --- /dev/null +++ b/azurelinuxagent/common/osutil/factory.py @@ -0,0 +1,69 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME + +from .default import DefaultOSUtil +from .coreos import CoreOSUtil +from .debian import DebianOSUtil +from .freebsd import FreeBSDOSUtil +from .redhat import RedhatOSUtil, Redhat6xOSUtil +from .suse import SUSEOSUtil, SUSE11OSUtil +from .ubuntu import UbuntuOSUtil, Ubuntu12OSUtil, Ubuntu14OSUtil, \ + UbuntuSnappyOSUtil + +def get_osutil(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "ubuntu": + if Version(distro_version) == Version("12.04") or \ + Version(distro_version) == Version("12.10"): + return Ubuntu12OSUtil() + elif Version(distro_version) == Version("14.04") or \ + Version(distro_version) == Version("14.10"): + return Ubuntu14OSUtil() + elif distro_full_name == "Snappy Ubuntu Core": + return UbuntuSnappyOSUtil() + else: + return UbuntuOSUtil() + if distro_name == "coreos": + return CoreOSUtil() + if distro_name == "suse": + if distro_full_name=='SUSE Linux Enterprise Server' and \ + Version(distro_version) < Version('12') or \ + distro_full_name == 'openSUSE' and \ + Version(distro_version) < Version('13.2'): + return SUSE11OSUtil() + else: + return SUSEOSUtil() + elif distro_name == "debian": + return DebianOSUtil() + elif distro_name == "redhat" or distro_name == "centos" or \ + distro_name == "oracle": + if Version(distro_version) < Version("7"): + return Redhat6xOSUtil() + else: + return RedhatOSUtil() + elif distro_name == "freebsd": + return FreeBSDOSUtil() + else: + logger.warn("Unable to load distro implemetation for {0}.", distro_name) + logger.warn("Use default distro implemetation instead.") + return DefaultOSUtil() + diff --git a/azurelinuxagent/common/osutil/freebsd.py b/azurelinuxagent/common/osutil/freebsd.py new file mode 100644 index 0000000..ddf8db6 --- /dev/null +++ b/azurelinuxagent/common/osutil/freebsd.py @@ -0,0 +1,198 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import OSUtilError +from azurelinuxagent.common.osutil.default import DefaultOSUtil + + +class FreeBSDOSUtil(DefaultOSUtil): + def __init__(self): + super(FreeBSDOSUtil, self).__init__() + self._scsi_disks_timeout_set = False + + def set_hostname(self, hostname): + rc_file_path = '/etc/rc.conf' + conf_file = fileutil.read_file(rc_file_path).split("\n") + textutil.set_ini_config(conf_file, "hostname", hostname) + fileutil.write_file(rc_file_path, "\n".join(conf_file)) + shellutil.run("hostname {0}".format(hostname), chk_err=False) + + def restart_ssh_service(self): + return shellutil.run('service sshd restart', chk_err=False) + + def useradd(self, username, expiration=None): + """ + Create user account with 'username' + """ + userentry = self.get_userentry(username) + if userentry is not None: + logger.warn("User {0} already exists, skip useradd", username) + return + + if expiration is not None: + cmd = "pw useradd {0} -e {1} -m".format(username, expiration) + else: + cmd = "pw useradd {0} -m".format(username) + retcode, out = shellutil.run_get_output(cmd) + if retcode != 0: + raise OSUtilError(("Failed to create user account:{0}, " + "retcode:{1}, " + "output:{2}").format(username, retcode, out)) + + def del_account(self, username): + if self.is_sys_user(username): + logger.error("{0} is a system user. Will not delete it.", username) + shellutil.run('> /var/run/utx.active') + shellutil.run('rmuser -y ' + username) + self.conf_sudoer(username, remove=True) + + def chpasswd(self, username, password, crypt_id=6, salt_len=10): + if self.is_sys_user(username): + raise OSUtilError(("User {0} is a system user. " + "Will not set passwd.").format(username)) + passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) + cmd = "echo '{0}'|pw usermod {1} -H 0 ".format(passwd_hash, username) + ret, output = shellutil.run_get_output(cmd, log_cmd=False) + if ret != 0: + raise OSUtilError(("Failed to set password for {0}: {1}" + "").format(username, output)) + + def del_root_password(self): + err = shellutil.run('pw mod user root -w no') + if err: + raise OSUtilError("Failed to delete root password: Failed to update password database.") + + def get_if_mac(self, ifname): + data = self._get_net_info() + if data[0] == ifname: + return data[2].replace(':', '').upper() + return None + + def get_first_if(self): + return self._get_net_info()[:2] + + def route_add(self, net, mask, gateway): + cmd = 'route add {0} {1} {2}'.format(net, gateway, mask) + return shellutil.run(cmd, chk_err=False) + + def is_missing_default_route(self): + """ + For FreeBSD, the default broadcast goes to current default gw, not a all-ones broadcast address, need to + specify the route manually to get it work in a VNET environment. + SEE ALSO: man ip(4) IP_ONESBCAST, + """ + return True + + def is_dhcp_enabled(self): + return True + + def start_dhcp_service(self): + shellutil.run("/etc/rc.d/dhclient start {0}".format(self.get_if_name()), chk_err=False) + + def allow_dhcp_broadcast(self): + pass + + def set_route_for_dhcp_broadcast(self, ifname): + return shellutil.run("route add 255.255.255.255 -iface {0}".format(ifname), chk_err=False) + + def remove_route_for_dhcp_broadcast(self, ifname): + shellutil.run("route delete 255.255.255.255 -iface {0}".format(ifname), chk_err=False) + + def get_dhcp_pid(self): + ret = shellutil.run_get_output("pgrep -n dhclient") + return ret[1] if ret[0] == 0 else None + + def eject_dvd(self, chk_err=True): + dvd = self.get_dvd_device() + retcode = shellutil.run("cdcontrol -f {0} eject".format(dvd)) + if chk_err and retcode != 0: + raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) + + def restart_if(self, ifname): + # Restart dhclient only to publish hostname + shellutil.run("/etc/rc.d/dhclient restart {0}".format(ifname), chk_err=False) + + def get_total_mem(self): + cmd = "sysctl hw.physmem |awk '{print $2}'" + ret, output = shellutil.run_get_output(cmd) + if ret: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + try: + return int(output)/1024/1024 + except ValueError: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + + def get_processor_cores(self): + ret, output = shellutil.run_get_output("sysctl hw.ncpu |awk '{print $2}'") + if ret: + raise OSUtilError("Failed to get processor cores.") + + try: + return int(output) + except ValueError: + raise OSUtilError("Failed to get total memory: {0}".format(output)) + + def set_scsi_disks_timeout(self, timeout): + if self._scsi_disks_timeout_set: + return + + ret, output = shellutil.run_get_output('sysctl kern.cam.da.default_timeout={0}'.format(timeout)) + if ret: + raise OSUtilError("Failed set SCSI disks timeout: {0}".format(output)) + self._scsi_disks_timeout_set = True + + def check_pid_alive(self, pid): + return shellutil.run('ps -p {0}'.format(pid), chk_err=False) == 0 + + @staticmethod + def _get_net_info(): + """ + There is no SIOCGIFCONF + on freeBSD - just parse ifconfig. + Returns strings: iface, inet4_addr, and mac + or 'None,None,None' if unable to parse. + We will sleep and retry as the network must be up. + """ + iface = '' + inet = '' + mac = '' + + err, output = shellutil.run_get_output('ifconfig -l ether', chk_err=False) + if err: + raise OSUtilError("Can't find ether interface:{0}".format(output)) + ifaces = output.split() + if not ifaces: + raise OSUtilError("Can't find ether interface.") + iface = ifaces[0] + + err, output = shellutil.run_get_output('ifconfig ' + iface, chk_err=False) + if err: + raise OSUtilError("Can't get info for interface:{0}".format(iface)) + + for line in output.split('\n'): + if line.find('inet ') != -1: + inet = line.split()[1] + elif line.find('ether ') != -1: + mac = line.split()[1] + logger.verbose("Interface info: ({0},{1},{2})", iface, inet, mac) + + return iface, inet, mac diff --git a/azurelinuxagent/common/osutil/redhat.py b/azurelinuxagent/common/osutil/redhat.py new file mode 100644 index 0000000..03084b6 --- /dev/null +++ b/azurelinuxagent/common/osutil/redhat.py @@ -0,0 +1,122 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import pwd +import shutil +import socket +import array +import struct +import fcntl +import time +import base64 +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr, bytebuffer +from azurelinuxagent.common.exception import OSUtilError, CryptError +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class Redhat6xOSUtil(DefaultOSUtil): + def __init__(self): + super(Redhat6xOSUtil, self).__init__() + + def start_network(self): + return shellutil.run("/sbin/service networking start", chk_err=False) + + def restart_ssh_service(self): + return shellutil.run("/sbin/service sshd condrestart", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("/sbin/service waagent stop", chk_err=False) + + def start_agent_service(self): + return shellutil.run("/sbin/service waagent start", chk_err=False) + + def register_agent_service(self): + return shellutil.run("chkconfig --add waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("chkconfig --del waagent", chk_err=False) + + def openssl_to_openssh(self, input_file, output_file): + pubkey = fileutil.read_file(input_file) + try: + cryptutil = CryptUtil(conf.get_openssl_cmd()) + ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) + except CryptError as e: + raise OSUtilError(ustr(e)) + fileutil.write_file(output_file, ssh_rsa_pubkey) + + #Override + def get_dhcp_pid(self): + ret= shellutil.run_get_output("pidof dhclient") + return ret[1] if ret[0] == 0 else None + + def set_hostname(self, hostname): + """ + Set /etc/sysconfig/network + """ + fileutil.update_conf_file('/etc/sysconfig/network', + 'HOSTNAME', + 'HOSTNAME={0}'.format(hostname)) + shellutil.run("hostname {0}".format(hostname), chk_err=False) + + def set_dhcp_hostname(self, hostname): + ifname = self.get_if_name() + filepath = "/etc/sysconfig/network-scripts/ifcfg-{0}".format(ifname) + fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', + 'DHCP_HOSTNAME={0}'.format(hostname)) + + def get_dhcp_lease_endpoint(self): + return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.leases') + +class RedhatOSUtil(Redhat6xOSUtil): + def __init__(self): + super(RedhatOSUtil, self).__init__() + + def set_hostname(self, hostname): + """ + Set /etc/hostname + Unlike redhat 6.x, redhat 7.x will set hostname to /etc/hostname + """ + DefaultOSUtil.set_hostname(self, hostname) + + def publish_hostname(self, hostname): + """ + Restart NetworkManager first before publishing hostname + """ + shellutil.run("service NetworkManager restart") + super(RedhatOSUtil, self).publish_hostname(hostname) + + def register_agent_service(self): + return shellutil.run("systemctl enable waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("systemctl disable waagent", chk_err=False) + + def openssl_to_openssh(self, input_file, output_file): + DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) + + def get_dhcp_lease_endpoint(self): + # centos7 has this weird naming with double hyphen like /var/lib/dhclient/dhclient--eth0.lease + return self.get_endpoint_from_leases_path('/var/lib/dhclient/dhclient-*.lease') diff --git a/azurelinuxagent/common/osutil/suse.py b/azurelinuxagent/common/osutil/suse.py new file mode 100644 index 0000000..f0ed0c0 --- /dev/null +++ b/azurelinuxagent/common/osutil/suse.py @@ -0,0 +1,108 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import pwd +import shutil +import socket +import array +import struct +import fcntl +import time +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.textutil as textutil +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class SUSE11OSUtil(DefaultOSUtil): + def __init__(self): + super(SUSE11OSUtil, self).__init__() + self.dhclient_name='dhcpcd' + + def set_hostname(self, hostname): + fileutil.write_file('/etc/HOSTNAME', hostname) + shellutil.run("hostname {0}".format(hostname), chk_err=False) + + def get_dhcp_pid(self): + ret= shellutil.run_get_output("pidof {0}".format(self.dhclient_name)) + return ret[1] if ret[0] == 0 else None + + def is_dhcp_enabled(self): + return True + + def stop_dhcp_service(self): + cmd = "/sbin/service {0} stop".format(self.dhclient_name) + return shellutil.run(cmd, chk_err=False) + + def start_dhcp_service(self): + cmd = "/sbin/service {0} start".format(self.dhclient_name) + return shellutil.run(cmd, chk_err=False) + + def start_network(self) : + return shellutil.run("/sbin/service start network", chk_err=False) + + def restart_ssh_service(self): + return shellutil.run("/sbin/service sshd restart", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("/sbin/service waagent stop", chk_err=False) + + def start_agent_service(self): + return shellutil.run("/sbin/service waagent start", chk_err=False) + + def register_agent_service(self): + return shellutil.run("/sbin/insserv waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("/sbin/insserv -r waagent", chk_err=False) + +class SUSEOSUtil(SUSE11OSUtil): + def __init__(self): + super(SUSEOSUtil, self).__init__() + self.dhclient_name = 'wickedd-dhcp4' + + def stop_dhcp_service(self): + cmd = "systemctl stop {0}".format(self.dhclient_name) + return shellutil.run(cmd, chk_err=False) + + def start_dhcp_service(self): + cmd = "systemctl start {0}".format(self.dhclient_name) + return shellutil.run(cmd, chk_err=False) + + def start_network(self) : + return shellutil.run("systemctl start network", chk_err=False) + + def restart_ssh_service(self): + return shellutil.run("systemctl restart sshd", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("systemctl stop waagent", chk_err=False) + + def start_agent_service(self): + return shellutil.run("systemctl start waagent", chk_err=False) + + def register_agent_service(self): + return shellutil.run("systemctl enable waagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("systemctl disable waagent", chk_err=False) + + diff --git a/azurelinuxagent/common/osutil/ubuntu.py b/azurelinuxagent/common/osutil/ubuntu.py new file mode 100644 index 0000000..4032cf4 --- /dev/null +++ b/azurelinuxagent/common/osutil/ubuntu.py @@ -0,0 +1,66 @@ +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil.default import DefaultOSUtil + +class Ubuntu14OSUtil(DefaultOSUtil): + def __init__(self): + super(Ubuntu14OSUtil, self).__init__() + + def start_network(self): + return shellutil.run("service networking start", chk_err=False) + + def stop_agent_service(self): + return shellutil.run("service walinuxagent stop", chk_err=False) + + def start_agent_service(self): + return shellutil.run("service walinuxagent start", chk_err=False) + + def remove_rules_files(self, rules_files=""): + pass + + def restore_rules_files(self, rules_files=""): + pass + + def get_dhcp_lease_endpoint(self): + return self.get_endpoint_from_leases_path('/var/lib/dhcp/dhclient.*.leases') + +class Ubuntu12OSUtil(Ubuntu14OSUtil): + def __init__(self): + super(Ubuntu12OSUtil, self).__init__() + + #Override + def get_dhcp_pid(self): + ret= shellutil.run_get_output("pidof dhclient3") + return ret[1] if ret[0] == 0 else None + +class UbuntuOSUtil(Ubuntu14OSUtil): + def __init__(self): + super(UbuntuOSUtil, self).__init__() + + def register_agent_service(self): + return shellutil.run("systemctl unmask walinuxagent", chk_err=False) + + def unregister_agent_service(self): + return shellutil.run("systemctl mask walinuxagent", chk_err=False) + +class UbuntuSnappyOSUtil(Ubuntu14OSUtil): + def __init__(self): + super(UbuntuSnappyOSUtil, self).__init__() + self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' diff --git a/azurelinuxagent/common/protocol/__init__.py b/azurelinuxagent/common/protocol/__init__.py new file mode 100644 index 0000000..fb7c273 --- /dev/null +++ b/azurelinuxagent/common/protocol/__init__.py @@ -0,0 +1,21 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.protocol.util import get_protocol_util, \ + OVF_FILE_NAME, \ + TAG_FILE_NAME + diff --git a/azurelinuxagent/common/protocol/hostplugin.py b/azurelinuxagent/common/protocol/hostplugin.py new file mode 100644 index 0000000..6569604 --- /dev/null +++ b/azurelinuxagent/common/protocol/hostplugin.py @@ -0,0 +1,124 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.common.protocol.wire import * +from azurelinuxagent.common.utils import textutil + +HOST_PLUGIN_PORT = 32526 +URI_FORMAT_GET_API_VERSIONS = "http://{0}:{1}/versions" +URI_FORMAT_PUT_VM_STATUS = "http://{0}:{1}/status" +URI_FORMAT_PUT_LOG = "http://{0}:{1}/vmAgentLog" +API_VERSION = "2015-09-01" + + +class HostPluginProtocol(object): + def __init__(self, endpoint): + if endpoint is None: + raise ProtocolError("Host plugin endpoint not provided") + self.is_initialized = False + self.is_available = False + self.api_versions = None + self.endpoint = endpoint + + def ensure_initialized(self): + if not self.is_initialized: + self.api_versions = self.get_api_versions() + self.is_available = API_VERSION in self.api_versions + self.is_initialized = True + return self.is_available + + def get_api_versions(self): + url = URI_FORMAT_GET_API_VERSIONS.format(self.endpoint, + HOST_PLUGIN_PORT) + logger.info("getting API versions at [{0}]".format(url)) + try: + response = restutil.http_get(url) + if response.status != httpclient.OK: + logger.error( + "get API versions returned status code [{0}]".format( + response.status)) + return [] + return response.read() + except HttpError as e: + logger.error("get API versions failed with [{0}]".format(e)) + return [] + + def put_vm_status(self, status_blob, sas_url): + """ + Try to upload the VM status via the host plugin /status channel + :param sas_url: the blob SAS url to pass to the host plugin + :type status_blob: StatusBlob + """ + if not self.ensure_initialized(): + logger.error("host plugin channel is not available") + return + if status_blob is None or status_blob.vm_status is None: + logger.error("no status data was provided") + return + url = URI_FORMAT_PUT_VM_STATUS.format(self.endpoint, HOST_PLUGIN_PORT) + status = textutil.b64encode(status_blob.vm_status) + headers = {"x-ms-version": API_VERSION} + blob_headers = [{'headerName': 'x-ms-version', + 'headerValue': status_blob.__storage_version__}, + {'headerName': 'x-ms-blob-type', + 'headerValue': status_blob.type}] + data = json.dumps({'requestUri': sas_url, 'headers': blob_headers, + 'content': status}, sort_keys=True) + logger.info("put VM status at [{0}]".format(url)) + try: + response = restutil.http_put(url, data, headers) + if response.status != httpclient.OK: + logger.error("put VM status returned status code [{0}]".format( + response.status)) + except HttpError as e: + logger.error("put VM status failed with [{0}]".format(e)) + + def put_vm_log(self, content, container_id, deployment_id): + """ + Try to upload the given content to the host plugin + :param deployment_id: the deployment id, which is obtained from the + goal state (tenant name) + :param container_id: the container id, which is obtained from the + goal state + :param content: the binary content of the zip file to upload + :return: + """ + if not self.ensure_initialized(): + logger.error("host plugin channel is not available") + return + if content is None or container_id is None or deployment_id is None: + logger.error( + "invalid arguments passed: " + "[{0}], [{1}], [{2}]".format( + content, + container_id, + deployment_id)) + return + url = URI_FORMAT_PUT_LOG.format(self.endpoint, HOST_PLUGIN_PORT) + + headers = {"x-ms-vmagentlog-deploymentid": deployment_id, + "x-ms-vmagentlog-containerid": container_id} + logger.info("put VM log at [{0}]".format(url)) + try: + response = restutil.http_put(url, content, headers) + if response.status != httpclient.OK: + logger.error("put log returned status code [{0}]".format( + response.status)) + except HttpError as e: + logger.error("put log failed with [{0}]".format(e)) diff --git a/azurelinuxagent/common/protocol/metadata.py b/azurelinuxagent/common/protocol/metadata.py new file mode 100644 index 0000000..f86f72f --- /dev/null +++ b/azurelinuxagent/common/protocol/metadata.py @@ -0,0 +1,223 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import json +import shutil +import os +import time +from azurelinuxagent.common.exception import ProtocolError, HttpError +from azurelinuxagent.common.future import httpclient, ustr +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.textutil as textutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol.restapi import * + +METADATA_ENDPOINT='169.254.169.254' +APIVERSION='2015-05-01-preview' +BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}{3}" + +TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem" +TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem" + +#TODO remote workarround for azure stack +MAX_PING = 30 +RETRY_PING_INTERVAL = 10 + +def _add_content_type(headers): + if headers is None: + headers = {} + headers["content-type"] = "application/json" + return headers + +class MetadataProtocol(Protocol): + + def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT): + self.apiversion = apiversion + self.endpoint = endpoint + self.identity_uri = BASE_URI.format(self.endpoint, "identity", + self.apiversion, "&$expand=*") + self.cert_uri = BASE_URI.format(self.endpoint, "certificates", + self.apiversion, "&$expand=*") + self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", + self.apiversion, "&$expand=*") + self.vmagent_uri = BASE_URI.format(self.endpoint, "vmAgentVersions", + self.apiversion, "&$expand=*") + self.provision_status_uri = BASE_URI.format(self.endpoint, + "provisioningStatus", + self.apiversion, "") + self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent", + self.apiversion, "") + self.ext_status_uri = BASE_URI.format(self.endpoint, + "status/extensions/{0}", + self.apiversion, "") + self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry", + self.apiversion, "") + + def _get_data(self, url, headers=None): + try: + resp = restutil.http_get(url, headers=headers) + except HttpError as e: + raise ProtocolError(ustr(e)) + + if resp.status != httpclient.OK: + raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) + + data = resp.read() + etag = resp.getheader('ETag') + if data is None: + return None + data = json.loads(ustr(data, encoding="utf-8")) + return data, etag + + def _put_data(self, url, data, headers=None): + headers = _add_content_type(headers) + try: + resp = restutil.http_put(url, json.dumps(data), headers=headers) + except HttpError as e: + raise ProtocolError(ustr(e)) + if resp.status != httpclient.OK: + raise ProtocolError("{0} - PUT: {1}".format(resp.status, url)) + + def _post_data(self, url, data, headers=None): + headers = _add_content_type(headers) + try: + resp = restutil.http_post(url, json.dumps(data), headers=headers) + except HttpError as e: + raise ProtocolError(ustr(e)) + if resp.status != httpclient.CREATED: + raise ProtocolError("{0} - POST: {1}".format(resp.status, url)) + + def _get_trans_cert(self): + trans_crt_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_CERT_FILE_NAME) + if not os.path.isfile(trans_crt_file): + raise ProtocolError("{0} is missing.".format(trans_crt_file)) + content = fileutil.read_file(trans_crt_file) + return textutil.get_bytes_from_pem(content) + + def detect(self): + self.get_vminfo() + trans_prv_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_PRV_FILE_NAME) + trans_cert_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_CERT_FILE_NAME) + cryptutil = CryptUtil(conf.get_openssl_cmd()) + cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) + + #"Install" the cert and private key to /var/lib/waagent + thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file) + prv_file = os.path.join(conf.get_lib_dir(), + "{0}.prv".format(thumbprint)) + crt_file = os.path.join(conf.get_lib_dir(), + "{0}.crt".format(thumbprint)) + shutil.copyfile(trans_prv_file, prv_file) + shutil.copyfile(trans_cert_file, crt_file) + + + def get_vminfo(self): + vminfo = VMInfo() + data, etag = self._get_data(self.identity_uri) + set_properties("vminfo", vminfo, data) + return vminfo + + def get_certs(self): + #TODO download and save certs + return CertList() + + def get_vmagent_manifests(self, last_etag=None): + manifests = VMAgentManifestList() + data, etag = self._get_data(self.vmagent_uri) + if last_etag == None or last_etag < etag: + set_properties("vmAgentManifests", manifests.vmAgentManifests, data) + return manifests, etag + + def get_vmagent_pkgs(self, vmagent_manifest): + #Agent package is the same with extension handler + vmagent_pkgs = ExtHandlerPackageList() + data = None + for manifest_uri in vmagent_manifest.versionsManifestUris: + try: + data = self._get_data(manifest_uri.uri) + break + except ProtocolError as e: + logger.warn("Failed to get vmagent versions: {0}", e) + logger.info("Retry getting vmagent versions") + if data is None: + raise ProtocolError(("Failed to get versions for vm agent: {0}" + "").format(vmagent_manifest.family)) + set_properties("vmAgentVersions", vmagent_pkgs, data) + # TODO: What etag should this return? + return vmagent_pkgs + + def get_ext_handlers(self, last_etag=None): + headers = { + "x-ms-vmagent-public-x509-cert": self._get_trans_cert() + } + ext_list = ExtHandlerList() + data, etag = self._get_data(self.ext_uri, headers=headers) + if last_etag == None or last_etag < etag: + set_properties("extensionHandlers", ext_list.extHandlers, data) + return ext_list, etag + + def get_ext_handler_pkgs(self, ext_handler): + ext_handler_pkgs = ExtHandlerPackageList() + data = None + for version_uri in ext_handler.versionUris: + try: + data, etag = self._get_data(version_uri.uri) + break + except ProtocolError as e: + logger.warn("Failed to get version uris: {0}", e) + logger.info("Retry getting version uris") + set_properties("extensionPackages", ext_handler_pkgs, data) + return ext_handler_pkgs + + def report_provision_status(self, provision_status): + validate_param('provisionStatus', provision_status, ProvisionStatus) + data = get_properties(provision_status) + self._put_data(self.provision_status_uri, data) + + def report_vm_status(self, vm_status): + validate_param('vmStatus', vm_status, VMStatus) + data = get_properties(vm_status) + #TODO code field is not implemented for metadata protocol yet. Remove it + handler_statuses = data['vmAgent']['extensionHandlers'] + for handler_status in handler_statuses: + try: + handler_status.pop('code', None) + except KeyError: + pass + + self._put_data(self.vm_status_uri, data) + + def report_ext_status(self, ext_handler_name, ext_name, ext_status): + validate_param('extensionStatus', ext_status, ExtensionStatus) + data = get_properties(ext_status) + uri = self.ext_status_uri.format(ext_name) + self._put_data(uri, data) + + def report_event(self, events): + #TODO disable telemetry for azure stack test + #validate_param('events', events, TelemetryEventList) + #data = get_properties(events) + #self._post_data(self.event_uri, data) + pass + diff --git a/azurelinuxagent/common/protocol/ovfenv.py b/azurelinuxagent/common/protocol/ovfenv.py new file mode 100644 index 0000000..4901871 --- /dev/null +++ b/azurelinuxagent/common/protocol/ovfenv.py @@ -0,0 +1,113 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +""" +Copy and parse ovf-env.xml from provisioning ISO and local cache +""" +import os +import re +import shutil +import xml.dom.minidom as minidom +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext + +OVF_VERSION = "1.0" +OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" +WA_NAME_SPACE = "http://schemas.microsoft.com/windowsazure" + +def _validate_ovf(val, msg): + if val is None: + raise ProtocolError("Failed to parse OVF XML: {0}".format(msg)) + +class OvfEnv(object): + """ + Read, and process provisioning info from provisioning file OvfEnv.xml + """ + def __init__(self, xml_text): + if xml_text is None: + raise ValueError("ovf-env is None") + logger.verbose("Load ovf-env.xml") + self.hostname = None + self.username = None + self.user_password = None + self.customdata = None + self.disable_ssh_password_auth = True + self.ssh_pubkeys = [] + self.ssh_keypairs = [] + self.parse(xml_text) + + def parse(self, xml_text): + """ + Parse xml tree, retreiving user and ssh key information. + Return self. + """ + wans = WA_NAME_SPACE + ovfns = OVF_NAME_SPACE + + xml_doc = parse_doc(xml_text) + + environment = find(xml_doc, "Environment", namespace=ovfns) + _validate_ovf(environment, "Environment not found") + + section = find(environment, "ProvisioningSection", namespace=wans) + _validate_ovf(section, "ProvisioningSection not found") + + version = findtext(environment, "Version", namespace=wans) + _validate_ovf(version, "Version not found") + + if version > OVF_VERSION: + logger.warn("Newer provisioning configuration detected. " + "Please consider updating waagent") + + conf_set = find(section, "LinuxProvisioningConfigurationSet", + namespace=wans) + _validate_ovf(conf_set, "LinuxProvisioningConfigurationSet not found") + + self.hostname = findtext(conf_set, "HostName", namespace=wans) + _validate_ovf(self.hostname, "HostName not found") + + self.username = findtext(conf_set, "UserName", namespace=wans) + _validate_ovf(self.username, "UserName not found") + + self.user_password = findtext(conf_set, "UserPassword", namespace=wans) + + self.customdata = findtext(conf_set, "CustomData", namespace=wans) + + auth_option = findtext(conf_set, "DisableSshPasswordAuthentication", + namespace=wans) + if auth_option is not None and auth_option.lower() == "true": + self.disable_ssh_password_auth = True + else: + self.disable_ssh_password_auth = False + + public_keys = findall(conf_set, "PublicKey", namespace=wans) + for public_key in public_keys: + path = findtext(public_key, "Path", namespace=wans) + fingerprint = findtext(public_key, "Fingerprint", namespace=wans) + value = findtext(public_key, "Value", namespace=wans) + self.ssh_pubkeys.append((path, fingerprint, value)) + + keypairs = findall(conf_set, "KeyPair", namespace=wans) + for keypair in keypairs: + path = findtext(keypair, "Path", namespace=wans) + fingerprint = findtext(keypair, "Fingerprint", namespace=wans) + self.ssh_keypairs.append((path, fingerprint)) + diff --git a/azurelinuxagent/common/protocol/restapi.py b/azurelinuxagent/common/protocol/restapi.py new file mode 100644 index 0000000..7f00488 --- /dev/null +++ b/azurelinuxagent/common/protocol/restapi.py @@ -0,0 +1,272 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import os +import copy +import re +import json +import xml.dom.minidom +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError, HttpError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.restutil as restutil + +def validate_param(name, val, expected_type): + if val is None: + raise ProtocolError("{0} is None".format(name)) + if not isinstance(val, expected_type): + raise ProtocolError(("{0} type should be {1} not {2}" + "").format(name, expected_type, type(val))) + +def set_properties(name, obj, data): + if isinstance(obj, DataContract): + validate_param("Property '{0}'".format(name), data, dict) + for prob_name, prob_val in data.items(): + prob_full_name = "{0}.{1}".format(name, prob_name) + try: + prob = getattr(obj, prob_name) + except AttributeError: + logger.warn("Unknown property: {0}", prob_full_name) + continue + prob = set_properties(prob_full_name, prob, prob_val) + setattr(obj, prob_name, prob) + return obj + elif isinstance(obj, DataContractList): + validate_param("List '{0}'".format(name), data, list) + for item_data in data: + item = obj.item_cls() + item = set_properties(name, item, item_data) + obj.append(item) + return obj + else: + return data + +def get_properties(obj): + if isinstance(obj, DataContract): + data = {} + props = vars(obj) + for prob_name, prob in list(props.items()): + data[prob_name] = get_properties(prob) + return data + elif isinstance(obj, DataContractList): + data = [] + for item in obj: + item_data = get_properties(item) + data.append(item_data) + return data + else: + return obj + +class DataContract(object): + pass + +class DataContractList(list): + def __init__(self, item_cls): + self.item_cls = item_cls + +""" +Data contract between guest and host +""" +class VMInfo(DataContract): + def __init__(self, subscriptionId=None, vmName=None, containerId=None, + roleName=None, roleInstanceName=None, tenantName=None): + self.subscriptionId = subscriptionId + self.vmName = vmName + self.containerId = containerId + self.roleName = roleName + self.roleInstanceName = roleInstanceName + self.tenantName = tenantName + +class Cert(DataContract): + def __init__(self, name=None, thumbprint=None, certificateDataUri=None): + self.name = name + self.thumbprint = thumbprint + self.certificateDataUri = certificateDataUri + +class CertList(DataContract): + def __init__(self): + self.certificates = DataContractList(Cert) + +#TODO: confirm vmagent manifest schema +class VMAgentManifestUri(DataContract): + def __init__(self, uri=None): + self.uri = uri + +class VMAgentManifest(DataContract): + def __init__(self, family=None): + self.family = family + self.versionsManifestUris = DataContractList(VMAgentManifestUri) + +class VMAgentManifestList(DataContract): + def __init__(self): + self.vmAgentManifests = DataContractList(VMAgentManifest) + +class Extension(DataContract): + def __init__(self, name=None, sequenceNumber=None, publicSettings=None, + protectedSettings=None, certificateThumbprint=None): + self.name = name + self.sequenceNumber = sequenceNumber + self.publicSettings = publicSettings + self.protectedSettings = protectedSettings + self.certificateThumbprint = certificateThumbprint + +class ExtHandlerProperties(DataContract): + def __init__(self): + self.version = None + self.upgradePolicy = None + self.state = None + self.extensions = DataContractList(Extension) + +class ExtHandlerVersionUri(DataContract): + def __init__(self): + self.uri = None + +class ExtHandler(DataContract): + def __init__(self, name=None): + self.name = name + self.properties = ExtHandlerProperties() + self.versionUris = DataContractList(ExtHandlerVersionUri) + +class ExtHandlerList(DataContract): + def __init__(self): + self.extHandlers = DataContractList(ExtHandler) + +class ExtHandlerPackageUri(DataContract): + def __init__(self, uri=None): + self.uri = uri + +class ExtHandlerPackage(DataContract): + def __init__(self, version = None): + self.version = version + self.uris = DataContractList(ExtHandlerPackageUri) + # TODO update the naming to align with metadata protocol + self.isinternal = False + +class ExtHandlerPackageList(DataContract): + def __init__(self): + self.versions = DataContractList(ExtHandlerPackage) + +class VMProperties(DataContract): + def __init__(self, certificateThumbprint=None): + #TODO need to confirm the property name + self.certificateThumbprint = certificateThumbprint + +class ProvisionStatus(DataContract): + def __init__(self, status=None, subStatus=None, description=None): + self.status = status + self.subStatus = subStatus + self.description = description + self.properties = VMProperties() + +class ExtensionSubStatus(DataContract): + def __init__(self, name=None, status=None, code=None, message=None): + self.name = name + self.status = status + self.code = code + self.message = message + +class ExtensionStatus(DataContract): + def __init__(self, configurationAppliedTime=None, operation=None, + status=None, seq_no=None, code=None, message=None): + self.configurationAppliedTime = configurationAppliedTime + self.operation = operation + self.status = status + self.sequenceNumber = seq_no + self.code = code + self.message = message + self.substatusList = DataContractList(ExtensionSubStatus) + +class ExtHandlerStatus(DataContract): + def __init__(self, name=None, version=None, status=None, code=0, + message=None): + self.name = name + self.version = version + self.status = status + self.code = code + self.message = message + self.extensions = DataContractList(ustr) + +class VMAgentStatus(DataContract): + def __init__(self, version=None, status=None, message=None): + self.version = version + self.status = status + self.message = message + self.extensionHandlers = DataContractList(ExtHandlerStatus) + +class VMStatus(DataContract): + def __init__(self): + self.vmAgent = VMAgentStatus() + +class TelemetryEventParam(DataContract): + def __init__(self, name=None, value=None): + self.name = name + self.value = value + +class TelemetryEvent(DataContract): + def __init__(self, eventId=None, providerId=None): + self.eventId = eventId + self.providerId = providerId + self.parameters = DataContractList(TelemetryEventParam) + +class TelemetryEventList(DataContract): + def __init__(self): + self.events = DataContractList(TelemetryEvent) + +class Protocol(DataContract): + + def detect(self): + raise NotImplementedError() + + def get_vminfo(self): + raise NotImplementedError() + + def get_certs(self): + raise NotImplementedError() + + def get_vmagent_manifests(self): + raise NotImplementedError() + + def get_vmagent_pkgs(self): + raise NotImplementedError() + + def get_ext_handlers(self): + raise NotImplementedError() + + def get_ext_handler_pkgs(self, extension): + raise NotImplementedError() + + def download_ext_handler_pkg(self, uri): + try: + resp = restutil.http_get(uri, chk_proxy=True) + if resp.status == restutil.httpclient.OK: + return resp.read() + except HttpError as e: + raise ProtocolError("Failed to download from: {0}".format(uri), e) + + def report_provision_status(self, provision_status): + raise NotImplementedError() + + def report_vm_status(self, vm_status): + raise NotImplementedError() + + def report_ext_status(self, ext_handler_name, ext_name, ext_status): + raise NotImplementedError() + + def report_event(self, event): + raise NotImplementedError() + diff --git a/azurelinuxagent/common/protocol/util.py b/azurelinuxagent/common/protocol/util.py new file mode 100644 index 0000000..7e7a74f --- /dev/null +++ b/azurelinuxagent/common/protocol/util.py @@ -0,0 +1,285 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import os +import re +import shutil +import time +import threading +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import ProtocolError, OSUtilError, \ + ProtocolNotFoundError, DhcpError +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.dhcp import get_dhcp_handler +from azurelinuxagent.common.protocol.ovfenv import OvfEnv +from azurelinuxagent.common.protocol.wire import WireProtocol +from azurelinuxagent.common.protocol.metadata import MetadataProtocol, \ + METADATA_ENDPOINT +import azurelinuxagent.common.utils.shellutil as shellutil + +OVF_FILE_NAME = "ovf-env.xml" + +#Tag file to indicate usage of metadata protocol +TAG_FILE_NAME = "useMetadataEndpoint.tag" + +PROTOCOL_FILE_NAME = "Protocol" + +#MAX retry times for protocol probing +MAX_RETRY = 360 + +PROBE_INTERVAL = 10 + +ENDPOINT_FILE_NAME = "WireServerEndpoint" + +def get_protocol_util(): + return ProtocolUtil() + +class ProtocolUtil(object): + """ + ProtocolUtil handles initialization for protocol instance. 2 protocol types + are invoked, wire protocol and metadata protocols. + """ + def __init__(self): + self.lock = threading.Lock() + self.protocol = None + self.osutil = get_osutil() + self.dhcp_handler = get_dhcp_handler() + + def copy_ovf_env(self): + """ + Copy ovf env file from dvd to hard disk. + Remove password before save it to the disk + """ + dvd_mount_point = conf.get_dvd_mount_point() + ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) + tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) + try: + self.osutil.mount_dvd() + ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) + ovfenv = OvfEnv(ovfxml) + ovfxml = re.sub(".*?<", "*<", ovfxml) + ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) + fileutil.write_file(ovf_file_path, ovfxml) + + if os.path.isfile(tag_file_path_on_dvd): + logger.info("Found {0} in provisioning ISO", TAG_FILE_NAME) + tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) + shutil.copyfile(tag_file_path_on_dvd, tag_file_path) + + except (OSUtilError, IOError) as e: + raise ProtocolError(ustr(e)) + + try: + self.osutil.umount_dvd() + self.osutil.eject_dvd() + except OSUtilError as e: + logger.warn(ustr(e)) + + return ovfenv + + def get_ovf_env(self): + """ + Load saved ovf-env.xml + """ + ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) + if os.path.isfile(ovf_file_path): + xml_text = fileutil.read_file(ovf_file_path) + return OvfEnv(xml_text) + else: + raise ProtocolError("ovf-env.xml is missing.") + + def _get_wireserver_endpoint(self): + try: + file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) + return fileutil.read_file(file_path) + except IOError as e: + raise OSUtilError(ustr(e)) + + def _set_wireserver_endpoint(self, endpoint): + try: + file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) + fileutil.write_file(file_path, endpoint) + except IOError as e: + raise OSUtilError(ustr(e)) + + def _detect_wire_protocol(self): + endpoint = self.dhcp_handler.endpoint + if endpoint is None: + logger.info("WireServer endpoint is not found. Rerun dhcp handler") + try: + self.dhcp_handler.run() + except DhcpError as e: + raise ProtocolError(ustr(e)) + endpoint = self.dhcp_handler.endpoint + + try: + protocol = WireProtocol(endpoint) + protocol.detect() + self._set_wireserver_endpoint(endpoint) + self.save_protocol("WireProtocol") + return protocol + except ProtocolError as e: + logger.info("WireServer is not responding. Reset endpoint") + self.dhcp_handler.endpoint = None + self.dhcp_handler.skip_cache = True + raise e + + def _detect_metadata_protocol(self): + protocol = MetadataProtocol() + protocol.detect() + + #Only allow root access METADATA_ENDPOINT + self.osutil.set_admin_access_to_ip(METADATA_ENDPOINT) + + self.save_protocol("MetadataProtocol") + + return protocol + + def _detect_protocol(self, protocols): + """ + Probe protocol endpoints in turn. + """ + self.clear_protocol() + + for retry in range(0, MAX_RETRY): + for protocol in protocols: + try: + if protocol == "WireProtocol": + return self._detect_wire_protocol() + + if protocol == "MetadataProtocol": + return self._detect_metadata_protocol() + + except ProtocolError as e: + logger.info("Protocol endpoint not found: {0}, {1}", + protocol, e) + + if retry < MAX_RETRY -1: + logger.info("Retry detect protocols: retry={0}", retry) + time.sleep(PROBE_INTERVAL) + raise ProtocolNotFoundError("No protocol found.") + + def _get_protocol(self): + """ + Get protocol instance based on previous detecting result. + """ + protocol_file_path = os.path.join(conf.get_lib_dir(), + PROTOCOL_FILE_NAME) + if not os.path.isfile(protocol_file_path): + raise ProtocolNotFoundError("No protocol found") + + protocol_name = fileutil.read_file(protocol_file_path) + if protocol_name == "WireProtocol": + endpoint = self._get_wireserver_endpoint() + return WireProtocol(endpoint) + elif protocol_name == "MetadataProtocol": + return MetadataProtocol() + else: + raise ProtocolNotFoundError(("Unknown protocol: {0}" + "").format(protocol_name)) + + def save_protocol(self, protocol_name): + """ + Save protocol endpoint + """ + protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + try: + fileutil.write_file(protocol_file_path, protocol_name) + except IOError as e: + logger.error("Failed to save protocol endpoint: {0}", e) + + + def clear_protocol(self): + """ + Cleanup previous saved endpoint. + """ + logger.info("Clean protocol") + self.protocol = None + protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) + if not os.path.isfile(protocol_file_path): + return + + try: + os.remove(protocol_file_path) + except IOError as e: + logger.error("Failed to clear protocol endpoint: {0}", e) + + def get_protocol(self): + """ + Detect protocol by endpoints + + :returns: protocol instance + """ + self.lock.acquire() + + try: + if self.protocol is not None: + return self.protocol + + try: + self.protocol = self._get_protocol() + return self.protocol + except ProtocolNotFoundError: + pass + + logger.info("Detect protocol endpoints") + protocols = ["WireProtocol", "MetadataProtocol"] + self.protocol = self._detect_protocol(protocols) + + return self.protocol + + finally: + self.lock.release() + + + def get_protocol_by_file(self): + """ + Detect protocol by tag file. + + If a file "useMetadataEndpoint.tag" is found on provision iso, + metedata protocol will be used. No need to probe for wire protocol + + :returns: protocol instance + """ + self.lock.acquire() + + try: + if self.protocol is not None: + return self.protocol + + try: + self.protocol = self._get_protocol() + return self.protocol + except ProtocolNotFoundError: + pass + + logger.info("Detect protocol by file") + tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) + protocols = [] + if os.path.isfile(tag_file_path): + protocols.append("MetadataProtocol") + else: + protocols.append("WireProtocol") + self.protocol = self._detect_protocol(protocols) + return self.protocol + + finally: + self.lock.release() diff --git a/azurelinuxagent/common/protocol/wire.py b/azurelinuxagent/common/protocol/wire.py new file mode 100644 index 0000000..29a1663 --- /dev/null +++ b/azurelinuxagent/common/protocol/wire.py @@ -0,0 +1,1218 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import time +import xml.sax.saxutils as saxutils +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import ProtocolNotFoundError +from azurelinuxagent.common.future import httpclient, bytebuffer +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext, \ + getattrib, gettext, remove_bom, get_bytes_from_pem +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.cryptutil import CryptUtil +from azurelinuxagent.common.protocol.restapi import * +from azurelinuxagent.common.protocol.hostplugin import HostPluginProtocol + +VERSION_INFO_URI = "http://{0}/?comp=versions" +GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" +HEALTH_REPORT_URI = "http://{0}/machine?comp=health" +ROLE_PROP_URI = "http://{0}/machine?comp=roleProperties" +TELEMETRY_URI = "http://{0}/machine?comp=telemetrydata" + +WIRE_SERVER_ADDR_FILE_NAME = "WireServer" +INCARNATION_FILE_NAME = "Incarnation" +GOAL_STATE_FILE_NAME = "GoalState.{0}.xml" +HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" +SHARED_CONF_FILE_NAME = "SharedConfig.xml" +CERTS_FILE_NAME = "Certificates.xml" +P7M_FILE_NAME = "Certificates.p7m" +PEM_FILE_NAME = "Certificates.pem" +EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" +MANIFEST_FILE_NAME = "{0}.{1}.manifest.xml" +TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" +TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" + +PROTOCOL_VERSION = "2012-11-30" +ENDPOINT_FINE_NAME = "WireServer" + +SHORT_WAITING_INTERVAL = 1 # 1 second +LONG_WAITING_INTERVAL = 15 # 15 seconds + + +class UploadError(HttpError): + pass + + +class WireProtocolResourceGone(ProtocolError): + pass + + +class WireProtocol(Protocol): + """Slim layer to adapt wire protocol data to metadata protocol interface""" + + # TODO: Clean-up goal state processing + # At present, some methods magically update GoalState (e.g., get_vmagent_manifests), others (e.g., get_vmagent_pkgs) + # assume its presence. A better approach would make an explicit update call that returns the incarnation number and + # establishes that number the "context" for all other calls (either by updating the internal state of the protocol or + # by having callers pass the incarnation number to the method). + + def __init__(self, endpoint): + if endpoint is None: + raise ProtocolError("WireProtocol endpoint is None") + self.endpoint = endpoint + self.client = WireClient(self.endpoint) + + def detect(self): + self.client.check_wire_protocol_version() + + trans_prv_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_PRV_FILE_NAME) + trans_cert_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_CERT_FILE_NAME) + cryptutil = CryptUtil(conf.get_openssl_cmd()) + cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) + + self.client.update_goal_state(forced=True) + + def get_vminfo(self): + goal_state = self.client.get_goal_state() + hosting_env = self.client.get_hosting_env() + + vminfo = VMInfo() + vminfo.subscriptionId = None + vminfo.vmName = hosting_env.vm_name + vminfo.tenantName = hosting_env.deployment_name + vminfo.roleName = hosting_env.role_name + vminfo.roleInstanceName = goal_state.role_instance_id + vminfo.containerId = goal_state.container_id + return vminfo + + def get_certs(self): + certificates = self.client.get_certs() + return certificates.cert_list + + def get_vmagent_manifests(self): + # Update goal state to get latest extensions config + self.client.update_goal_state() + goal_state = self.client.get_goal_state() + ext_conf = self.client.get_ext_conf() + return ext_conf.vmagent_manifests, goal_state.incarnation + + def get_vmagent_pkgs(self, vmagent_manifest): + goal_state = self.client.get_goal_state() + man = self.client.get_gafamily_manifest(vmagent_manifest, goal_state) + return man.pkg_list + + def get_ext_handlers(self): + logger.verbose("Get extension handler config") + # Update goal state to get latest extensions config + self.client.update_goal_state() + goal_state = self.client.get_goal_state() + ext_conf = self.client.get_ext_conf() + # In wire protocol, incarnation is equivalent to ETag + return ext_conf.ext_handlers, goal_state.incarnation + + def get_ext_handler_pkgs(self, ext_handler): + logger.verbose("Get extension handler package") + goal_state = self.client.get_goal_state() + man = self.client.get_ext_manifest(ext_handler, goal_state) + return man.pkg_list + + def report_provision_status(self, provision_status): + validate_param("provision_status", provision_status, ProvisionStatus) + + if provision_status.status is not None: + self.client.report_health(provision_status.status, + provision_status.subStatus, + provision_status.description) + if provision_status.properties.certificateThumbprint is not None: + thumbprint = provision_status.properties.certificateThumbprint + self.client.report_role_prop(thumbprint) + + def report_vm_status(self, vm_status): + validate_param("vm_status", vm_status, VMStatus) + self.client.status_blob.set_vm_status(vm_status) + self.client.upload_status_blob() + + def report_ext_status(self, ext_handler_name, ext_name, ext_status): + validate_param("ext_status", ext_status, ExtensionStatus) + self.client.status_blob.set_ext_status(ext_handler_name, ext_status) + + def report_event(self, events): + validate_param("events", events, TelemetryEventList) + self.client.report_event(events) + + +def _build_role_properties(container_id, role_instance_id, thumbprint): + xml = (u"" + u"" + u"" + u"{0}" + u"" + u"" + u"{1}" + u"" + u"" + u"" + u"" + u"" + u"" + u"" + u"").format(container_id, role_instance_id, thumbprint) + return xml + + +def _build_health_report(incarnation, container_id, role_instance_id, + status, substatus, description): + # Escape '&', '<' and '>' + description = saxutils.escape(ustr(description)) + detail = u'' + if substatus is not None: + substatus = saxutils.escape(ustr(substatus)) + detail = (u"
" + u"{0}" + u"{1}" + u"
").format(substatus, description) + xml = (u"" + u"" + u"{0}" + u"" + u"{1}" + u"" + u"" + u"{2}" + u"" + u"{3}" + u"{4}" + u"" + u"" + u"" + u"" + u"" + u"").format(incarnation, + container_id, + role_instance_id, + status, + detail) + return xml + + +""" +Convert VMStatus object to status blob format +""" + + +def ga_status_to_v1(ga_status): + formatted_msg = { + 'lang': 'en-US', + 'message': ga_status.message + } + v1_ga_status = { + 'version': ga_status.version, + 'status': ga_status.status, + 'formattedMessage': formatted_msg + } + return v1_ga_status + + +def ext_substatus_to_v1(sub_status_list): + status_list = [] + for substatus in sub_status_list: + status = { + "name": substatus.name, + "status": substatus.status, + "code": substatus.code, + "formattedMessage": { + "lang": "en-US", + "message": substatus.message + } + } + status_list.append(status) + return status_list + + +def ext_status_to_v1(ext_name, ext_status): + if ext_status is None: + return None + timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) + v1_ext_status = { + "status": { + "name": ext_name, + "configurationAppliedTime": ext_status.configurationAppliedTime, + "operation": ext_status.operation, + "status": ext_status.status, + "code": ext_status.code, + "formattedMessage": { + "lang": "en-US", + "message": ext_status.message + } + }, + "version": 1.0, + "timestampUTC": timestamp + } + if len(v1_sub_status) != 0: + v1_ext_status['substatus'] = v1_sub_status + return v1_ext_status + + +def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): + v1_handler_status = { + 'handlerVersion': handler_status.version, + 'handlerName': handler_status.name, + 'status': handler_status.status, + 'code': handler_status.code + } + if handler_status.message is not None: + v1_handler_status["formattedMessage"] = { + "lang": "en-US", + "message": handler_status.message + } + + if len(handler_status.extensions) > 0: + # Currently, no more than one extension per handler + ext_name = handler_status.extensions[0] + ext_status = ext_statuses.get(ext_name) + v1_ext_status = ext_status_to_v1(ext_name, ext_status) + if ext_status is not None and v1_ext_status is not None: + v1_handler_status["runtimeSettingsStatus"] = { + 'settingsStatus': v1_ext_status, + 'sequenceNumber': ext_status.sequenceNumber + } + return v1_handler_status + + +def vm_status_to_v1(vm_status, ext_statuses): + timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + + v1_ga_status = ga_status_to_v1(vm_status.vmAgent) + v1_handler_status_list = [] + for handler_status in vm_status.vmAgent.extensionHandlers: + v1_handler_status = ext_handler_status_to_v1(handler_status, + ext_statuses, timestamp) + if v1_handler_status is not None: + v1_handler_status_list.append(v1_handler_status) + + v1_agg_status = { + 'guestAgentStatus': v1_ga_status, + 'handlerAggregateStatus': v1_handler_status_list + } + v1_vm_status = { + 'version': '1.0', + 'timestampUTC': timestamp, + 'aggregateStatus': v1_agg_status + } + return v1_vm_status + + +class StatusBlob(object): + def __init__(self, client): + self.vm_status = None + self.ext_statuses = {} + self.client = client + self.type = None + self.data = None + + def set_vm_status(self, vm_status): + validate_param("vmAgent", vm_status, VMStatus) + self.vm_status = vm_status + + def set_ext_status(self, ext_handler_name, ext_status): + validate_param("extensionStatus", ext_status, ExtensionStatus) + self.ext_statuses[ext_handler_name] = ext_status + + def to_json(self): + report = vm_status_to_v1(self.vm_status, self.ext_statuses) + return json.dumps(report) + + __storage_version__ = "2014-02-14" + + def upload(self, url): + # TODO upload extension only if content has changed + logger.verbose("Upload status blob") + upload_successful = False + self.type = self.get_blob_type(url) + self.data = self.to_json() + try: + if self.type == "BlockBlob": + self.put_block_blob(url, self.data) + elif self.type == "PageBlob": + self.put_page_blob(url, self.data) + else: + raise ProtocolError("Unknown blob type: {0}".format(self.type)) + except HttpError as e: + logger.warn("Initial upload failed [{0}]".format(e)) + else: + logger.verbose("Uploading status blob succeeded") + upload_successful = True + return upload_successful + + def get_blob_type(self, url): + # Check blob type + logger.verbose("Check blob type.") + timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + try: + resp = self.client.call_storage_service(restutil.http_head, url, { + "x-ms-date": timestamp, + 'x-ms-version': self.__class__.__storage_version__ + }) + except HttpError as e: + raise ProtocolError((u"Failed to get status blob type: {0}" + u"").format(e)) + if resp is None or resp.status != httpclient.OK: + raise ProtocolError(("Failed to get status blob type: {0}" + "").format(resp.status)) + + blob_type = resp.getheader("x-ms-blob-type") + logger.verbose("Blob type={0}".format(blob_type)) + return blob_type + + def put_block_blob(self, url, data): + logger.verbose("Upload block blob") + timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + resp = self.client.call_storage_service(restutil.http_put, url, data, + { + "x-ms-date": timestamp, + "x-ms-blob-type": "BlockBlob", + "Content-Length": ustr(len(data)), + "x-ms-version": self.__class__.__storage_version__ + }) + if resp.status != httpclient.CREATED: + raise UploadError( + "Failed to upload block blob: {0}".format(resp.status)) + + def put_page_blob(self, url, data): + logger.verbose("Replace old page blob") + + # Convert string into bytes + data = bytearray(data, encoding='utf-8') + timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) + + # Align to 512 bytes + page_blob_size = int((len(data) + 511) / 512) * 512 + resp = self.client.call_storage_service(restutil.http_put, url, "", + { + "x-ms-date": timestamp, + "x-ms-blob-type": "PageBlob", + "Content-Length": "0", + "x-ms-blob-content-length": ustr(page_blob_size), + "x-ms-version": self.__class__.__storage_version__ + }) + if resp.status != httpclient.CREATED: + raise UploadError( + "Failed to clean up page blob: {0}".format(resp.status)) + + if url.count("?") < 0: + url = "{0}?comp=page".format(url) + else: + url = "{0}&comp=page".format(url) + + logger.verbose("Upload page blob") + page_max = 4 * 1024 * 1024 # Max page size: 4MB + start = 0 + end = 0 + while end < len(data): + end = min(len(data), start + page_max) + content_size = end - start + # Align to 512 bytes + page_end = int((end + 511) / 512) * 512 + buf_size = page_end - start + buf = bytearray(buf_size) + buf[0: content_size] = data[start: end] + resp = self.client.call_storage_service( + restutil.http_put, url, bytebuffer(buf), + { + "x-ms-date": timestamp, + "x-ms-range": "bytes={0}-{1}".format(start, page_end - 1), + "x-ms-page-write": "update", + "x-ms-version": self.__class__.__storage_version__, + "Content-Length": ustr(page_end - start) + }) + if resp is None or resp.status != httpclient.CREATED: + raise UploadError( + "Failed to upload page blob: {0}".format(resp.status)) + start = end + + +def event_param_to_v1(param): + param_format = '' + param_type = type(param.value) + attr_type = "" + if param_type is int: + attr_type = 'mt:uint64' + elif param_type is str: + attr_type = 'mt:wstr' + elif ustr(param_type).count("'unicode'") > 0: + attr_type = 'mt:wstr' + elif param_type is bool: + attr_type = 'mt:bool' + elif param_type is float: + attr_type = 'mt:float64' + return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), + attr_type) + + +def event_to_v1(event): + params = "" + for param in event.parameters: + params += event_param_to_v1(param) + event_str = ('' + '' + '').format(event.eventId, params) + return event_str + + +class WireClient(object): + def __init__(self, endpoint): + logger.info("Wire server endpoint:{0}", endpoint) + self.endpoint = endpoint + self.goal_state = None + self.updated = None + self.hosting_env = None + self.shared_conf = None + self.certs = None + self.ext_conf = None + self.last_request = 0 + self.req_count = 0 + self.status_blob = StatusBlob(self) + self.host_plugin = HostPluginProtocol(self.endpoint) + + def prevent_throttling(self): + """ + Try to avoid throttling of wire server + """ + now = time.time() + if now - self.last_request < 1: + logger.verbose("Last request issued less than 1 second ago") + logger.verbose("Sleep {0} second to avoid throttling.", + SHORT_WAITING_INTERVAL) + time.sleep(SHORT_WAITING_INTERVAL) + self.last_request = now + + self.req_count += 1 + if self.req_count % 3 == 0: + logger.verbose("Sleep {0} second to avoid throttling.", + SHORT_WAITING_INTERVAL) + time.sleep(SHORT_WAITING_INTERVAL) + self.req_count = 0 + + def call_wireserver(self, http_req, *args, **kwargs): + """ + Call wire server. Handle throttling(403) and Resource Gone(410) + """ + self.prevent_throttling() + for retry in range(0, 3): + resp = http_req(*args, **kwargs) + if resp.status == httpclient.FORBIDDEN: + logger.warn("Sending too much request to wire server") + logger.info("Sleep {0} second to avoid throttling.", + LONG_WAITING_INTERVAL) + time.sleep(LONG_WAITING_INTERVAL) + elif resp.status == httpclient.GONE: + msg = args[0] if len(args) > 0 else "" + raise WireProtocolResourceGone(msg) + else: + return resp + raise ProtocolError(("Calling wire server failed: {0}" + "").format(resp.status)) + + def decode_config(self, data): + if data is None: + return None + data = remove_bom(data) + xml_text = ustr(data, encoding='utf-8') + return xml_text + + def fetch_config(self, uri, headers): + try: + resp = self.call_wireserver(restutil.http_get, uri, + headers=headers) + except HttpError as e: + raise ProtocolError(ustr(e)) + + if (resp.status != httpclient.OK): + raise ProtocolError("{0} - {1}".format(resp.status, uri)) + + return self.decode_config(resp.read()) + + def fetch_cache(self, local_file): + if not os.path.isfile(local_file): + raise ProtocolError("{0} is missing.".format(local_file)) + try: + return fileutil.read_file(local_file) + except IOError as e: + raise ProtocolError("Failed to read cache: {0}".format(e)) + + def save_cache(self, local_file, data): + try: + fileutil.write_file(local_file, data) + except IOError as e: + raise ProtocolError("Failed to write cache: {0}".format(e)) + + def call_storage_service(self, http_req, *args, **kwargs): + """ + Call storage service, handle SERVICE_UNAVAILABLE(503) + """ + for retry in range(0, 3): + resp = http_req(*args, **kwargs) + if resp.status == httpclient.SERVICE_UNAVAILABLE: + logger.warn("Storage service is not avaible temporaryly") + logger.info("Will retry later, in {0} seconds", + LONG_WAITING_INTERVAL) + time.sleep(LONG_WAITING_INTERVAL) + else: + return resp + raise ProtocolError(("Calling storage endpoint failed: {0}" + "").format(resp.status)) + + def fetch_manifest(self, version_uris): + for version_uri in version_uris: + logger.verbose("Fetch ext handler manifest: {0}", version_uri.uri) + try: + resp = self.call_storage_service(restutil.http_get, + version_uri.uri, None, + chk_proxy=True) + except HttpError as e: + raise ProtocolError(ustr(e)) + + if resp.status == httpclient.OK: + return self.decode_config(resp.read()) + logger.warn("Failed to fetch ExtensionManifest: {0}, {1}", + resp.status, version_uri.uri) + logger.info("Will retry later, in {0} seconds", + LONG_WAITING_INTERVAL) + time.sleep(LONG_WAITING_INTERVAL) + raise ProtocolError(("Failed to fetch ExtensionManifest from " + "all sources")) + + def update_hosting_env(self, goal_state): + if goal_state.hosting_env_uri is None: + raise ProtocolError("HostingEnvironmentConfig uri is empty") + local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) + xml_text = self.fetch_config(goal_state.hosting_env_uri, + self.get_header()) + self.save_cache(local_file, xml_text) + self.hosting_env = HostingEnv(xml_text) + + def update_shared_conf(self, goal_state): + if goal_state.shared_conf_uri is None: + raise ProtocolError("SharedConfig uri is empty") + local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) + xml_text = self.fetch_config(goal_state.shared_conf_uri, + self.get_header()) + self.save_cache(local_file, xml_text) + self.shared_conf = SharedConfig(xml_text) + + def update_certs(self, goal_state): + if goal_state.certs_uri is None: + return + local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) + xml_text = self.fetch_config(goal_state.certs_uri, + self.get_header_for_cert()) + self.save_cache(local_file, xml_text) + self.certs = Certificates(self, xml_text) + + def update_ext_conf(self, goal_state): + if goal_state.ext_uri is None: + logger.info("ExtensionsConfig.xml uri is empty") + self.ext_conf = ExtensionsConfig(None) + return + incarnation = goal_state.incarnation + local_file = os.path.join(conf.get_lib_dir(), + EXT_CONF_FILE_NAME.format(incarnation)) + xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) + self.save_cache(local_file, xml_text) + self.ext_conf = ExtensionsConfig(xml_text) + + def update_goal_state(self, forced=False, max_retry=3): + uri = GOAL_STATE_URI.format(self.endpoint) + xml_text = self.fetch_config(uri, self.get_header()) + goal_state = GoalState(xml_text) + + incarnation_file = os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME) + + if not forced: + last_incarnation = None + if (os.path.isfile(incarnation_file)): + last_incarnation = fileutil.read_file(incarnation_file) + new_incarnation = goal_state.incarnation + if last_incarnation is not None and \ + last_incarnation == new_incarnation: + # Goalstate is not updated. + return + + # Start updating goalstate, retry on 410 + for retry in range(0, max_retry): + try: + self.goal_state = goal_state + file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) + goal_state_file = os.path.join(conf.get_lib_dir(), file_name) + self.save_cache(goal_state_file, xml_text) + self.save_cache(incarnation_file, goal_state.incarnation) + self.update_hosting_env(goal_state) + self.update_shared_conf(goal_state) + self.update_certs(goal_state) + self.update_ext_conf(goal_state) + return + except WireProtocolResourceGone: + logger.info("Incarnation is out of date. Update goalstate.") + xml_text = self.fetch_config(uri, self.get_header()) + goal_state = GoalState(xml_text) + + raise ProtocolError("Exceeded max retry updating goal state") + + def get_goal_state(self): + if (self.goal_state is None): + incarnation_file = os.path.join(conf.get_lib_dir(), + INCARNATION_FILE_NAME) + incarnation = self.fetch_cache(incarnation_file) + + file_name = GOAL_STATE_FILE_NAME.format(incarnation) + goal_state_file = os.path.join(conf.get_lib_dir(), file_name) + xml_text = self.fetch_cache(goal_state_file) + self.goal_state = GoalState(xml_text) + return self.goal_state + + def get_hosting_env(self): + if (self.hosting_env is None): + local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) + xml_text = self.fetch_cache(local_file) + self.hosting_env = HostingEnv(xml_text) + return self.hosting_env + + def get_shared_conf(self): + if (self.shared_conf is None): + local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) + xml_text = self.fetch_cache(local_file) + self.shared_conf = SharedConfig(xml_text) + return self.shared_conf + + def get_certs(self): + if (self.certs is None): + local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) + xml_text = self.fetch_cache(local_file) + self.certs = Certificates(self, xml_text) + if self.certs is None: + return None + return self.certs + + def get_ext_conf(self): + if (self.ext_conf is None): + goal_state = self.get_goal_state() + if goal_state.ext_uri is None: + self.ext_conf = ExtensionsConfig(None) + else: + local_file = EXT_CONF_FILE_NAME.format(goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) + xml_text = self.fetch_cache(local_file) + self.ext_conf = ExtensionsConfig(xml_text) + return self.ext_conf + + def get_ext_manifest(self, ext_handler, goal_state): + local_file = MANIFEST_FILE_NAME.format(ext_handler.name, + goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) + xml_text = self.fetch_manifest(ext_handler.versionUris) + self.save_cache(local_file, xml_text) + return ExtensionManifest(xml_text) + + def get_gafamily_manifest(self, vmagent_manifest, goal_state): + local_file = MANIFEST_FILE_NAME.format(vmagent_manifest.family, + goal_state.incarnation) + local_file = os.path.join(conf.get_lib_dir(), local_file) + xml_text = self.fetch_manifest(vmagent_manifest.versionsManifestUris) + fileutil.write_file(local_file, xml_text) + return ExtensionManifest(xml_text) + + def check_wire_protocol_version(self): + uri = VERSION_INFO_URI.format(self.endpoint) + version_info_xml = self.fetch_config(uri, None) + version_info = VersionInfo(version_info_xml) + + preferred = version_info.get_preferred() + if PROTOCOL_VERSION == preferred: + logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) + elif PROTOCOL_VERSION in version_info.get_supported(): + logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) + logger.warn("Server prefered version:{0}", preferred) + else: + error = ("Agent supported wire protocol version: {0} was not " + "advised by Fabric.").format(PROTOCOL_VERSION) + raise ProtocolNotFoundError(error) + + def upload_status_blob(self): + ext_conf = self.get_ext_conf() + if ext_conf.status_upload_blob is not None: + if not self.status_blob.upload(ext_conf.status_upload_blob): + self.host_plugin.put_vm_status(self.status_blob, + ext_conf.status_upload_blob) + + def report_role_prop(self, thumbprint): + goal_state = self.get_goal_state() + role_prop = _build_role_properties(goal_state.container_id, + goal_state.role_instance_id, + thumbprint) + role_prop = role_prop.encode("utf-8") + role_prop_uri = ROLE_PROP_URI.format(self.endpoint) + headers = self.get_header_for_xml_content() + try: + resp = self.call_wireserver(restutil.http_post, role_prop_uri, + role_prop, headers=headers) + except HttpError as e: + raise ProtocolError((u"Failed to send role properties: {0}" + u"").format(e)) + if resp.status != httpclient.ACCEPTED: + raise ProtocolError((u"Failed to send role properties: {0}" + u", {1}").format(resp.status, resp.read())) + + def report_health(self, status, substatus, description): + goal_state = self.get_goal_state() + health_report = _build_health_report(goal_state.incarnation, + goal_state.container_id, + goal_state.role_instance_id, + status, + substatus, + description) + health_report = health_report.encode("utf-8") + health_report_uri = HEALTH_REPORT_URI.format(self.endpoint) + headers = self.get_header_for_xml_content() + try: + resp = self.call_wireserver(restutil.http_post, health_report_uri, + health_report, headers=headers, max_retry=8) + except HttpError as e: + raise ProtocolError((u"Failed to send provision status: {0}" + u"").format(e)) + if resp.status != httpclient.OK: + raise ProtocolError((u"Failed to send provision status: {0}" + u", {1}").format(resp.status, resp.read())) + + def send_event(self, provider_id, event_str): + uri = TELEMETRY_URI.format(self.endpoint) + data_format = ('' + '' + '{1}' + '' + '') + data = data_format.format(provider_id, event_str) + try: + header = self.get_header_for_xml_content() + resp = self.call_wireserver(restutil.http_post, uri, data, header) + except HttpError as e: + raise ProtocolError("Failed to send events:{0}".format(e)) + + if resp.status != httpclient.OK: + logger.verbose(resp.read()) + raise ProtocolError("Failed to send events:{0}".format(resp.status)) + + def report_event(self, event_list): + buf = {} + # Group events by providerId + for event in event_list.events: + if event.providerId not in buf: + buf[event.providerId] = "" + event_str = event_to_v1(event) + if len(event_str) >= 63 * 1024: + logger.warn("Single event too large: {0}", event_str[300:]) + continue + if len(buf[event.providerId] + event_str) >= 63 * 1024: + self.send_event(event.providerId, buf[event.providerId]) + buf[event.providerId] = "" + buf[event.providerId] = buf[event.providerId] + event_str + + # Send out all events left in buffer. + for provider_id in list(buf.keys()): + if len(buf[provider_id]) > 0: + self.send_event(provider_id, buf[provider_id]) + + def get_header(self): + return { + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION + } + + def get_header_for_xml_content(self): + return { + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION, + "Content-Type": "text/xml;charset=utf-8" + } + + def get_header_for_cert(self): + trans_cert_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_CERT_FILE_NAME) + content = self.fetch_cache(trans_cert_file) + cert = get_bytes_from_pem(content) + return { + "x-ms-agent-name": "WALinuxAgent", + "x-ms-version": PROTOCOL_VERSION, + "x-ms-cipher-name": "DES_EDE3_CBC", + "x-ms-guest-agent-public-x509-cert": cert + } + +class VersionInfo(object): + def __init__(self, xml_text): + """ + Query endpoint server for wire protocol version. + Fail if our desired protocol version is not seen. + """ + logger.verbose("Load Version.xml") + self.parse(xml_text) + + def parse(self, xml_text): + xml_doc = parse_doc(xml_text) + preferred = find(xml_doc, "Preferred") + self.preferred = findtext(preferred, "Version") + logger.info("Fabric preferred wire protocol version:{0}", self.preferred) + + self.supported = [] + supported = find(xml_doc, "Supported") + supported_version = findall(supported, "Version") + for node in supported_version: + version = gettext(node) + logger.verbose("Fabric supported wire protocol version:{0}", version) + self.supported.append(version) + + def get_preferred(self): + return self.preferred + + def get_supported(self): + return self.supported + + +class GoalState(object): + def __init__(self, xml_text): + if xml_text is None: + raise ValueError("GoalState.xml is None") + logger.verbose("Load GoalState.xml") + self.incarnation = None + self.expected_state = None + self.hosting_env_uri = None + self.shared_conf_uri = None + self.certs_uri = None + self.ext_uri = None + self.role_instance_id = None + self.container_id = None + self.load_balancer_probe_port = None + self.parse(xml_text) + + def parse(self, xml_text): + """ + Request configuration data from endpoint server. + """ + self.xml_text = xml_text + xml_doc = parse_doc(xml_text) + self.incarnation = findtext(xml_doc, "Incarnation") + self.expected_state = findtext(xml_doc, "ExpectedState") + self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") + self.shared_conf_uri = findtext(xml_doc, "SharedConfig") + self.certs_uri = findtext(xml_doc, "Certificates") + self.ext_uri = findtext(xml_doc, "ExtensionsConfig") + role_instance = find(xml_doc, "RoleInstance") + self.role_instance_id = findtext(role_instance, "InstanceId") + container = find(xml_doc, "Container") + self.container_id = findtext(container, "ContainerId") + lbprobe_ports = find(xml_doc, "LBProbePorts") + self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") + return self + + +class HostingEnv(object): + """ + parse Hosting enviromnet config and store in + HostingEnvironmentConfig.xml + """ + + def __init__(self, xml_text): + if xml_text is None: + raise ValueError("HostingEnvironmentConfig.xml is None") + logger.verbose("Load HostingEnvironmentConfig.xml") + self.vm_name = None + self.role_name = None + self.deployment_name = None + self.parse(xml_text) + + def parse(self, xml_text): + """ + parse and create HostingEnvironmentConfig.xml. + """ + self.xml_text = xml_text + xml_doc = parse_doc(xml_text) + incarnation = find(xml_doc, "Incarnation") + self.vm_name = getattrib(incarnation, "instance") + role = find(xml_doc, "Role") + self.role_name = getattrib(role, "name") + deployment = find(xml_doc, "Deployment") + self.deployment_name = getattrib(deployment, "name") + return self + + +class SharedConfig(object): + """ + parse role endpoint server and goal state config. + """ + + def __init__(self, xml_text): + logger.verbose("Load SharedConfig.xml") + self.parse(xml_text) + + def parse(self, xml_text): + """ + parse and write configuration to file SharedConfig.xml. + """ + # Not used currently + return self + +class Certificates(object): + """ + Object containing certificates of host and provisioned user. + """ + + def __init__(self, client, xml_text): + logger.verbose("Load Certificates.xml") + self.client = client + self.cert_list = CertList() + self.parse(xml_text) + + def parse(self, xml_text): + """ + Parse multiple certificates into seperate files. + """ + xml_doc = parse_doc(xml_text) + data = findtext(xml_doc, "Data") + if data is None: + return + + cryptutil = CryptUtil(conf.get_openssl_cmd()) + p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) + p7m = ("MIME-Version:1.0\n" + "Content-Disposition: attachment; filename=\"{0}\"\n" + "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" + "Content-Transfer-Encoding: base64\n" + "\n" + "{2}").format(p7m_file, p7m_file, data) + + self.client.save_cache(p7m_file, p7m) + + trans_prv_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_PRV_FILE_NAME) + trans_cert_file = os.path.join(conf.get_lib_dir(), + TRANSPORT_CERT_FILE_NAME) + pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) + # decrypt certificates + cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, + pem_file) + + # The parsing process use public key to match prv and crt. + buf = [] + begin_crt = False + begin_prv = False + prvs = {} + thumbprints = {} + index = 0 + v1_cert_list = [] + with open(pem_file) as pem: + for line in pem.readlines(): + buf.append(line) + if re.match(r'[-]+BEGIN.*KEY[-]+', line): + begin_prv = True + elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): + begin_crt = True + elif re.match(r'[-]+END.*KEY[-]+', line): + tmp_file = self.write_to_tmp_file(index, 'prv', buf) + pub = cryptutil.get_pubkey_from_prv(tmp_file) + prvs[pub] = tmp_file + buf = [] + index += 1 + begin_prv = False + elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): + tmp_file = self.write_to_tmp_file(index, 'crt', buf) + pub = cryptutil.get_pubkey_from_crt(tmp_file) + thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) + thumbprints[pub] = thumbprint + # Rename crt with thumbprint as the file name + crt = "{0}.crt".format(thumbprint) + v1_cert_list.append({ + "name": None, + "thumbprint": thumbprint + }) + os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) + buf = [] + index += 1 + begin_crt = False + + # Rename prv key with thumbprint as the file name + for pubkey in prvs: + thumbprint = thumbprints[pubkey] + if thumbprint: + tmp_file = prvs[pubkey] + prv = "{0}.prv".format(thumbprint) + os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) + + for v1_cert in v1_cert_list: + cert = Cert() + set_properties("certs", cert, v1_cert) + self.cert_list.certificates.append(cert) + + def write_to_tmp_file(self, index, suffix, buf): + file_name = os.path.join(conf.get_lib_dir(), + "{0}.{1}".format(index, suffix)) + self.client.save_cache(file_name, "".join(buf)) + return file_name + + +class ExtensionsConfig(object): + """ + parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. + Install if true, remove if it is set to false. + """ + + def __init__(self, xml_text): + logger.verbose("Load ExtensionsConfig.xml") + self.ext_handlers = ExtHandlerList() + self.vmagent_manifests = VMAgentManifestList() + self.status_upload_blob = None + if xml_text is not None: + self.parse(xml_text) + + def parse(self, xml_text): + """ + Write configuration to file ExtensionsConfig.xml. + """ + xml_doc = parse_doc(xml_text) + + ga_families_list = find(xml_doc, "GAFamilies") + ga_families = findall(ga_families_list, "GAFamily") + + for ga_family in ga_families: + family = findtext(ga_family, "Name") + uris_list = find(ga_family, "Uris") + uris = findall(uris_list, "Uri") + manifest = VMAgentManifest() + manifest.family = family + for uri in uris: + manifestUri = VMAgentManifestUri(uri=gettext(uri)) + manifest.versionsManifestUris.append(manifestUri) + self.vmagent_manifests.vmAgentManifests.append(manifest) + + plugins_list = find(xml_doc, "Plugins") + plugins = findall(plugins_list, "Plugin") + plugin_settings_list = find(xml_doc, "PluginSettings") + plugin_settings = findall(plugin_settings_list, "Plugin") + + for plugin in plugins: + ext_handler = self.parse_plugin(plugin) + self.ext_handlers.extHandlers.append(ext_handler) + self.parse_plugin_settings(ext_handler, plugin_settings) + + self.status_upload_blob = findtext(xml_doc, "StatusUploadBlob") + + def parse_plugin(self, plugin): + ext_handler = ExtHandler() + ext_handler.name = getattrib(plugin, "name") + ext_handler.properties.version = getattrib(plugin, "version") + ext_handler.properties.state = getattrib(plugin, "state") + + auto_upgrade = getattrib(plugin, "autoUpgrade") + if auto_upgrade is not None and auto_upgrade.lower() == "true": + ext_handler.properties.upgradePolicy = "auto" + else: + ext_handler.properties.upgradePolicy = "manual" + + location = getattrib(plugin, "location") + failover_location = getattrib(plugin, "failoverlocation") + for uri in [location, failover_location]: + version_uri = ExtHandlerVersionUri() + version_uri.uri = uri + ext_handler.versionUris.append(version_uri) + return ext_handler + + def parse_plugin_settings(self, ext_handler, plugin_settings): + if plugin_settings is None: + return + + name = ext_handler.name + version = ext_handler.properties.version + settings = [x for x in plugin_settings \ + if getattrib(x, "name") == name and \ + getattrib(x, "version") == version] + + if settings is None or len(settings) == 0: + return + + runtime_settings = None + runtime_settings_node = find(settings[0], "RuntimeSettings") + seqNo = getattrib(runtime_settings_node, "seqNo") + runtime_settings_str = gettext(runtime_settings_node) + try: + runtime_settings = json.loads(runtime_settings_str) + except ValueError as e: + logger.error("Invalid extension settings") + return + + for plugin_settings_list in runtime_settings["runtimeSettings"]: + handler_settings = plugin_settings_list["handlerSettings"] + ext = Extension() + # There is no "extension name" in wire protocol. + # Put + ext.name = ext_handler.name + ext.sequenceNumber = seqNo + ext.publicSettings = handler_settings.get("publicSettings") + ext.protectedSettings = handler_settings.get("protectedSettings") + thumbprint = handler_settings.get("protectedSettingsCertThumbprint") + ext.certificateThumbprint = thumbprint + ext_handler.properties.extensions.append(ext) + + +class ExtensionManifest(object): + def __init__(self, xml_text): + if xml_text is None: + raise ValueError("ExtensionManifest is None") + logger.verbose("Load ExtensionManifest.xml") + self.pkg_list = ExtHandlerPackageList() + self.parse(xml_text) + + def parse(self, xml_text): + xml_doc = parse_doc(xml_text) + self._handle_packages(findall(find(xml_doc, "Plugins"), "Plugin"), False) + self._handle_packages(findall(find(xml_doc, "InternalPlugins"), "Plugin"), True) + + def _handle_packages(self, packages, isinternal): + for package in packages: + version = findtext(package, "Version") + + disallow_major_upgrade = findtext(package, "DisallowMajorVersionUpgrade") + if disallow_major_upgrade is None: + disallow_major_upgrade = '' + disallow_major_upgrade = disallow_major_upgrade.lower() == "true" + + uris = find(package, "Uris") + uri_list = findall(uris, "Uri") + uri_list = [gettext(x) for x in uri_list] + pkg = ExtHandlerPackage() + pkg.version = version + pkg.disallow_major_upgrade = disallow_major_upgrade + for uri in uri_list: + pkg_uri = ExtHandlerVersionUri() + pkg_uri.uri = uri + pkg.uris.append(pkg_uri) + + pkg.isinternal = isinternal + self.pkg_list.versions.append(pkg) diff --git a/azurelinuxagent/common/rdma.py b/azurelinuxagent/common/rdma.py new file mode 100644 index 0000000..0c17e38 --- /dev/null +++ b/azurelinuxagent/common/rdma.py @@ -0,0 +1,280 @@ +# Windows Azure Linux Agent +# +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +""" +Handle packages and modules to enable RDMA for IB networking +""" + +import os +import re +import time +import threading + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib + + +from azurelinuxagent.common.protocol.wire import SHARED_CONF_FILE_NAME + +dapl_config_paths = [ + '/etc/dat.conf', + '/etc/rdma/dat.conf', + '/usr/local/etc/dat.conf' +] + +def setup_rdma_device(): + logger.verbose("Parsing SharedConfig XML contents for RDMA details") + xml_doc = parse_doc( + fileutil.read_file(os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME))) + if xml_doc is None: + logger.error("Could not parse SharedConfig XML document") + return + instance_elem = find(xml_doc, "Instance") + if not instance_elem: + logger.error("Could not find in SharedConfig document") + return + + rdma_ipv4_addr = getattrib(instance_elem, "rdmaIPv4Address") + if not rdma_ipv4_addr: + logger.error( + "Could not find rdmaIPv4Address attribute on Instance element of SharedConfig.xml document") + return + + rdma_mac_addr = getattrib(instance_elem, "rdmaMacAddress") + if not rdma_mac_addr: + logger.error( + "Could not find rdmaMacAddress attribute on Instance element of SharedConfig.xml document") + return + + # add colons to the MAC address (e.g. 00155D33FF1D -> + # 00:15:5D:33:FF:1D) + rdma_mac_addr = ':'.join([rdma_mac_addr[i:i+2] + for i in range(0, len(rdma_mac_addr), 2)]) + logger.info("Found RDMA details. IPv4={0} MAC={1}".format( + rdma_ipv4_addr, rdma_mac_addr)) + + # Set up the RDMA device with collected informatino + RDMADeviceHandler(rdma_ipv4_addr, rdma_mac_addr).start() + logger.info("RDMA: device is set up") + return + +class RDMAHandler(object): + + driver_module_name = 'hv_network_direct' + + @staticmethod + def get_rdma_version(): + """Retrieve the firmware version information from the system. + This depends on information provided by the Linux kernel.""" + + driver_info_source = '/var/lib/hyperv/.kvp_pool_0' + base_kernel_err_msg = 'Kernel does not provide the necessary ' + base_kernel_err_msg += 'information or the hv_kvp_daemon is not ' + base_kernel_err_msg += 'running.' + if not os.path.isfile(driver_info_source): + error_msg = 'RDMA: Source file "%s" does not exist. ' + error_msg += base_kernel_err_msg + logger.error(error_msg % driver_info_source) + return + + lines = open(driver_info_source).read() + if not lines: + error_msg = 'RDMA: Source file "%s" is empty. ' + error_msg += base_kernel_err_msg + logger.error(error_msg % driver_info_source) + return + + r = re.search("NdDriverVersion\0+(\d\d\d\.\d)", lines) + if r: + NdDriverVersion = r.groups()[0] + return NdDriverVersion + else: + error_msg = 'RDMA: NdDriverVersion not found in "%s"' + logger.error(error_msg % driver_info_source) + return + + def load_driver_module(self): + """Load the kernel driver, this depends on the proper driver + to be installed with the install_driver() method""" + logger.info("RDMA: probing module '%s'" % self.driver_module_name) + result = shellutil.run('modprobe %s' % self.driver_module_name) + if result != 0: + error_msg = 'Could not load "%s" kernel module. ' + error_msg += 'Run "modprobe %s" as root for more details' + logger.error( + error_msg % (self.driver_module_name, self.driver_module_name) + ) + return + logger.info('RDMA: Loaded the kernel driver successfully.') + return True + + def install_driver(self): + """Install the driver. This is distribution specific and must + be overwritten in the child implementation.""" + logger.error('RDMAHandler.install_driver not implemented') + + def is_driver_loaded(self): + """Check if the network module is loaded in kernel space""" + cmd = 'lsmod | grep ^%s' % self.driver_module_name + status, loaded_modules = shellutil.run_get_output(cmd) + logger.info('RDMA: Checking if the module loaded.') + if loaded_modules: + logger.info('RDMA: module loaded.') + return True + logger.info('RDMA: module not loaded.') + + def reboot_system(self): + """Reboot the system. This is required as the kernel module for + the rdma driver cannot be unloaded with rmmod""" + logger.info('RDMA: Rebooting system.') + ret = shellutil.run('shutdown -r now') + if ret != 0: + logger.error('RDMA: Failed to reboot the system') + + +dapl_config_paths = [ + '/etc/dat.conf', '/etc/rdma/dat.conf', '/usr/local/etc/dat.conf'] + +class RDMADeviceHandler(object): + + """ + Responsible for writing RDMA IP and MAC address to the /dev/hvnd_rdma + interface. + """ + + rdma_dev = '/dev/hvnd_rdma' + device_check_timeout_sec = 120 + device_check_interval_sec = 1 + + ipv4_addr = None + mac_adr = None + + def __init__(self, ipv4_addr, mac_addr): + self.ipv4_addr = ipv4_addr + self.mac_addr = mac_addr + + def start(self): + """ + Start a thread in the background to process the RDMA tasks and returns. + """ + logger.info("RDMA: starting device processing in the background.") + threading.Thread(target=self.process).start() + + def process(self): + RDMADeviceHandler.wait_rdma_device( + self.rdma_dev, self.device_check_timeout_sec, self.device_check_interval_sec) + RDMADeviceHandler.update_dat_conf(dapl_config_paths, self.ipv4_addr) + RDMADeviceHandler.write_rdma_config_to_device( + self.rdma_dev, self.ipv4_addr, self.mac_addr) + RDMADeviceHandler.update_network_interface(self.mac_addr, self.ipv4_addr) + + @staticmethod + def update_dat_conf(paths, ipv4_addr): + """ + Looks at paths for dat.conf file and updates the ip address for the + infiniband interface. + """ + logger.info("Updating DAPL configuration file") + for f in paths: + logger.info("RDMA: trying {0}".format(f)) + if not os.path.isfile(f): + logger.info( + "RDMA: DAPL config not found at {0}".format(f)) + continue + logger.info("RDMA: DAPL config is at: {0}".format(f)) + cfg = fileutil.read_file(f) + new_cfg = RDMADeviceHandler.replace_dat_conf_contents( + cfg, ipv4_addr) + fileutil.write_file(f, new_cfg) + logger.info("RDMA: DAPL configuration is updated") + return + + raise Exception("RDMA: DAPL configuration file not found at predefined paths") + + @staticmethod + def replace_dat_conf_contents(cfg, ipv4_addr): + old = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"\S+ 0\"" + new = "ofa-v2-ib0 u2.0 nonthreadsafe default libdaplofa.so.2 dapl.2.0 \"{0} 0\"".format( + ipv4_addr) + return re.sub(old, new, cfg) + + @staticmethod + def write_rdma_config_to_device(path, ipv4_addr, mac_addr): + data = RDMADeviceHandler.generate_rdma_config(ipv4_addr, mac_addr) + logger.info( + "RDMA: Updating device with configuration: {0}".format(data)) + with open(path, "w") as f: + f.write(data) + logger.info("RDMA: Updated device with IPv4/MAC addr successfully") + + @staticmethod + def generate_rdma_config(ipv4_addr, mac_addr): + return 'rdmaMacAddress="{0}" rdmaIPv4Address="{1}"'.format(mac_addr, ipv4_addr) + + @staticmethod + def wait_rdma_device(path, timeout_sec, check_interval_sec): + logger.info("RDMA: waiting for device={0} timeout={1}s".format(path, timeout_sec)) + total_retries = timeout_sec/check_interval_sec + n = 0 + while n < total_retries: + if os.path.exists(path): + logger.info("RDMA: device ready") + return + logger.verbose( + "RDMA: device not ready, sleep {0}s".format(check_interval_sec)) + time.sleep(check_interval_sec) + n += 1 + logger.error("RDMA device wait timed out") + raise Exception("The device did not show up in {0} seconds ({1} retries)".format( + timeout_sec, total_retries)) + + @staticmethod + def update_network_interface(mac_addr, ipv4_addr): + netmask=16 + + logger.info("RDMA: will update the network interface with IPv4/MAC") + + if_name=RDMADeviceHandler.get_interface_by_mac(mac_addr) + logger.info("RDMA: network interface found: {0}", if_name) + logger.info("RDMA: bringing network interface up") + if shellutil.run("ifconfig {0} up".format(if_name)) != 0: + raise Exception("Could not bring up RMDA interface: {0}".format(if_name)) + + logger.info("RDMA: configuring IPv4 addr and netmask on interface") + addr = '{0}/{1}'.format(ipv4_addr, netmask) + if shellutil.run("ifconfig {0} {1}".format(if_name, addr)) != 0: + raise Exception("Could set addr to {1} on {0}".format(if_name, addr)) + logger.info("RDMA: network address and netmask configured on interface") + + @staticmethod + def get_interface_by_mac(mac): + ret, output = shellutil.run_get_output("ifconfig -a") + if ret != 0: + raise Exception("Failed to list network interfaces") + output = output.replace('\n', '') + match = re.search(r"(eth\d).*(HWaddr|ether) {0}".format(mac), + output, re.IGNORECASE) + if match is None: + raise Exception("Failed to get ifname with mac: {0}".format(mac)) + output = match.group(0) + eths = re.findall(r"eth\d", output) + if eths is None or len(eths) == 0: + raise Exception("ifname with mac: {0} not found".format(mac)) + return eths[-1] diff --git a/azurelinuxagent/common/utils/__init__.py b/azurelinuxagent/common/utils/__init__.py new file mode 100644 index 0000000..1ea2f38 --- /dev/null +++ b/azurelinuxagent/common/utils/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + diff --git a/azurelinuxagent/common/utils/cryptutil.py b/azurelinuxagent/common/utils/cryptutil.py new file mode 100644 index 0000000..b35bda0 --- /dev/null +++ b/azurelinuxagent/common/utils/cryptutil.py @@ -0,0 +1,121 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import base64 +import struct +from azurelinuxagent.common.future import ustr, bytebuffer +from azurelinuxagent.common.exception import CryptError +import azurelinuxagent.common.utils.shellutil as shellutil + +class CryptUtil(object): + def __init__(self, openssl_cmd): + self.openssl_cmd = openssl_cmd + + def gen_transport_cert(self, prv_file, crt_file): + """ + Create ssl certificate for https communication with endpoint server. + """ + cmd = ("{0} req -x509 -nodes -subj /CN=LinuxTransport -days 32768 " + "-newkey rsa:2048 -keyout {1} " + "-out {2}").format(self.openssl_cmd, prv_file, crt_file) + shellutil.run(cmd) + + def get_pubkey_from_prv(self, file_name): + cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, + file_name) + pub = shellutil.run_get_output(cmd)[1] + return pub + + def get_pubkey_from_crt(self, file_name): + cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, + file_name) + pub = shellutil.run_get_output(cmd)[1] + return pub + + def get_thumbprint_from_crt(self, file_name): + cmd="{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, + file_name) + thumbprint = shellutil.run_get_output(cmd)[1] + thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() + return thumbprint + + def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): + cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " + "| {4} pkcs12 -nodes -password pass: -out {5}" + "").format(self.openssl_cmd, p7m_file, trans_prv_file, + trans_cert_file, self.openssl_cmd, pem_file) + shellutil.run(cmd) + + def crt_to_ssh(self, input_file, output_file): + shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, + output_file)) + + def asn1_to_ssh(self, pubkey): + lines = pubkey.split("\n") + lines = [x for x in lines if not x.startswith("----")] + base64_encoded = "".join(lines) + try: + #TODO remove pyasn1 dependency + from pyasn1.codec.der import decoder as der_decoder + der_encoded = base64.b64decode(base64_encoded) + der_encoded = der_decoder.decode(der_encoded)[0][1] + key = der_decoder.decode(self.bits_to_bytes(der_encoded))[0] + n=key[0] + e=key[1] + keydata = bytearray() + keydata.extend(struct.pack('>I', len("ssh-rsa"))) + keydata.extend(b"ssh-rsa") + keydata.extend(struct.pack('>I', len(self.num_to_bytes(e)))) + keydata.extend(self.num_to_bytes(e)) + keydata.extend(struct.pack('>I', len(self.num_to_bytes(n)) + 1)) + keydata.extend(b"\0") + keydata.extend(self.num_to_bytes(n)) + keydata_base64 = base64.b64encode(bytebuffer(keydata)) + return ustr(b"ssh-rsa " + keydata_base64 + b"\n", + encoding='utf-8') + except ImportError as e: + raise CryptError("Failed to load pyasn1.codec.der") + + def num_to_bytes(self, num): + """ + Pack number into bytes. Retun as string. + """ + result = bytearray() + while num: + result.append(num & 0xFF) + num >>= 8 + result.reverse() + return result + + def bits_to_bytes(self, bits): + """ + Convert an array contains bits, [0,1] to a byte array + """ + index = 7 + byte_array = bytearray() + curr = 0 + for bit in bits: + curr = curr | (bit << index) + index = index - 1 + if index == -1: + byte_array.append(curr) + curr = 0 + index = 7 + return bytes(byte_array) + diff --git a/azurelinuxagent/common/utils/fileutil.py b/azurelinuxagent/common/utils/fileutil.py new file mode 100644 index 0000000..7ef4fef --- /dev/null +++ b/azurelinuxagent/common/utils/fileutil.py @@ -0,0 +1,171 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +""" +File operation util functions +""" + +import os +import re +import shutil +import pwd +import tempfile +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.utils.textutil as textutil + +def copy_file(from_path, to_path=None, to_dir=None): + if to_path is None: + to_path = os.path.join(to_dir, os.path.basename(from_path)) + shutil.copyfile(from_path, to_path) + return to_path + + +def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): + """ + Read and return contents of 'filepath'. + """ + mode = 'rb' + with open(filepath, mode) as in_file: + data = in_file.read() + if data is None: + return None + + if asbin: + return data + + if remove_bom: + #Remove bom on bytes data before it is converted into string. + data = textutil.remove_bom(data) + data = ustr(data, encoding=encoding) + return data + +def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False): + """ + Write 'contents' to 'filepath'. + """ + mode = "ab" if append else "wb" + data = contents + if not asbin: + data = contents.encode(encoding) + with open(filepath, mode) as out_file: + out_file.write(data) + +def append_file(filepath, contents, asbin=False, encoding='utf-8'): + """ + Append 'contents' to 'filepath'. + """ + write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) + + +def base_name(path): + head, tail = os.path.split(path) + return tail + +def get_line_startingwith(prefix, filepath): + """ + Return line from 'filepath' if the line startswith 'prefix' + """ + for line in read_file(filepath).split('\n'): + if line.startswith(prefix): + return line + return None + +#End File operation util functions + +def mkdir(dirpath, mode=None, owner=None): + if not os.path.isdir(dirpath): + os.makedirs(dirpath) + if mode is not None: + chmod(dirpath, mode) + if owner is not None: + chowner(dirpath, owner) + +def chowner(path, owner): + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + else: + owner_info = pwd.getpwnam(owner) + os.chown(path, owner_info[2], owner_info[3]) + +def chmod(path, mode): + if not os.path.exists(path): + logger.error("Path does not exist: {0}".format(path)) + else: + os.chmod(path, mode) + +def rm_files(*args): + for path in args: + if os.path.isfile(path): + os.remove(path) + +def rm_dirs(*args): + """ + Remove all the contents under the directry + """ + for dir_name in args: + if os.path.isdir(dir_name): + for item in os.listdir(dir_name): + path = os.path.join(dir_name, item) + if os.path.isfile(path): + os.remove(path) + elif os.path.isdir(path): + shutil.rmtree(path) + +def trim_ext(path, ext): + if not ext.startswith("."): + ext = "." + ext + return path.split(ext)[0] if path.endswith(ext) else path + +def update_conf_file(path, line_start, val, chk_err=False): + conf = [] + if not os.path.isfile(path) and chk_err: + raise IOError("Can't find config file:{0}".format(path)) + conf = read_file(path).split('\n') + conf = [x for x in conf if not x.startswith(line_start)] + conf.append(val) + write_file(path, '\n'.join(conf)) + +def search_file(target_dir_name, target_file_name): + for root, dirs, files in os.walk(target_dir_name): + for file_name in files: + if file_name == target_file_name: + return os.path.join(root, file_name) + return None + +def chmod_tree(path, mode): + for root, dirs, files in os.walk(path): + for file_name in files: + os.chmod(os.path.join(root, file_name), mode) + +def findstr_in_file(file_path, pattern_str): + """ + Return match object if found in file. + """ + try: + pattern = re.compile(pattern_str) + for line in (open(file_path, 'r')).readlines(): + match = re.search(pattern, line) + if match: + return match + except: + raise + + return None + diff --git a/azurelinuxagent/common/utils/flexible_version.py b/azurelinuxagent/common/utils/flexible_version.py new file mode 100644 index 0000000..2fce88d --- /dev/null +++ b/azurelinuxagent/common/utils/flexible_version.py @@ -0,0 +1,199 @@ +from distutils import version +import re + +class FlexibleVersion(version.Version): + """ + A more flexible implementation of distutils.version.StrictVersion + + The implementation allows to specify: + - an arbitrary number of version numbers: + not only '1.2.3' , but also '1.2.3.4.5' + - the separator between version numbers: + '1-2-3' is allowed when '-' is specified as separator + - a flexible pre-release separator: + '1.2.3.alpha1', '1.2.3-alpha1', and '1.2.3alpha1' are considered equivalent + - an arbitrary ordering of pre-release tags: + 1.1alpha3 < 1.1beta2 < 1.1rc1 < 1.1 + when ["alpha", "beta", "rc"] is specified as pre-release tag list + + Inspiration from this discussion at StackOverflow: + http://stackoverflow.com/questions/12255554/sort-versions-in-python + """ + + def __init__(self, vstring=None, sep='.', prerel_tags=('alpha', 'beta', 'rc')): + version.Version.__init__(self) + + if sep is None: + sep = '.' + if prerel_tags is None: + prerel_tags = () + + self.sep = sep + self.prerel_sep = '' + self.prerel_tags = tuple(prerel_tags) if prerel_tags is not None else () + + self._compile_pattern() + + self.prerelease = None + self.version = () + if vstring: + self._parse(vstring) + return + + _nn_version = 'version' + _nn_prerel_sep = 'prerel_sep' + _nn_prerel_tag = 'tag' + _nn_prerel_num = 'tag_num' + + _re_prerel_sep = r'(?P<{pn}>{sep})?'.format( + pn=_nn_prerel_sep, + sep='|'.join(map(re.escape, ('.', '-')))) + + @property + def major(self): + return self.version[0] if len(self.version) > 0 else 0 + + @property + def minor(self): + return self.version[1] if len(self.version) > 1 else 0 + + @property + def patch(self): + return self.version[2] if len(self.version) > 2 else 0 + + def _parse(self, vstring): + m = self.version_re.match(vstring) + if not m: + raise ValueError("Invalid version number '{0}'".format(vstring)) + + self.prerelease = None + self.version = () + + self.prerel_sep = m.group(self._nn_prerel_sep) + tag = m.group(self._nn_prerel_tag) + tag_num = m.group(self._nn_prerel_num) + + if tag is not None and tag_num is not None: + self.prerelease = (tag, int(tag_num) if len(tag_num) else None) + + self.version = tuple(map(int, self.sep_re.split(m.group(self._nn_version)))) + return + + def __add__(self, increment): + version = list(self.version) + version[-1] += increment + vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) + return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) + + def __sub__(self, decrement): + version = list(self.version) + if version[-1] <= 0: + raise ArithmeticError("Cannot decrement final numeric component of {0} below zero" \ + .format(self)) + version[-1] -= decrement + vstring = self._assemble(version, self.sep, self.prerel_sep, self.prerelease) + return FlexibleVersion(vstring=vstring, sep=self.sep, prerel_tags=self.prerel_tags) + + def __repr__(self): + return "{cls} ('{vstring}', '{sep}', {prerel_tags})"\ + .format( + cls=self.__class__.__name__, + vstring=str(self), + sep=self.sep, + prerel_tags=self.prerel_tags) + + def __str__(self): + return self._assemble(self.version, self.sep, self.prerel_sep, self.prerelease) + + def __ge__(self, that): + return not self.__lt__(that) + + def __gt__(self, that): + return (not self.__lt__(that)) and (not self.__eq__(that)) + + def __le__(self, that): + return (self.__lt__(that)) or (self.__eq__(that)) + + def __lt__(self, that): + this_version, that_version = self._ensure_compatible(that) + + if this_version != that_version \ + or self.prerelease is None and that.prerelease is None: + return this_version < that_version + + if self.prerelease is not None and that.prerelease is None: + return True + if self.prerelease is None and that.prerelease is not None: + return False + + this_index = self.prerel_tags_set[self.prerelease[0]] + that_index = self.prerel_tags_set[that.prerelease[0]] + if this_index == that_index: + return self.prerelease[1] < that.prerelease[1] + + return this_index < that_index + + def __ne__(self, that): + return not self.__eq__(that) + + def __eq__(self, that): + this_version, that_version = self._ensure_compatible(that) + + if this_version != that_version: + return False + + if self.prerelease != that.prerelease: + return False + + return True + + def _assemble(self, version, sep, prerel_sep, prerelease): + s = sep.join(map(str, version)) + if prerelease is not None: + if prerel_sep is not None: + s += prerel_sep + s += prerelease[0] + if prerelease[1] is not None: + s += str(prerelease[1]) + return s + + def _compile_pattern(self): + sep, self.sep_re = self._compile_separator(self.sep) + + if self.prerel_tags: + tags = '|'.join(re.escape(tag) for tag in self.prerel_tags) + self.prerel_tags_set = dict(zip(self.prerel_tags, range(len(self.prerel_tags)))) + release_re = '(?:{prerel_sep}(?P<{tn}>{tags})(?P<{nn}>\d*))?'.format( + prerel_sep=self._re_prerel_sep, + tags=tags, + tn=self._nn_prerel_tag, + nn=self._nn_prerel_num) + else: + release_re = '' + + version_re = r'^(?P<{vn}>\d+(?:(?:{sep}\d+)*)?){rel}$'.format( + vn=self._nn_version, + sep=sep, + rel=release_re) + self.version_re = re.compile(version_re) + return + + def _compile_separator(self, sep): + if sep is None: + return '', re.compile('') + return re.escape(sep), re.compile(re.escape(sep)) + + def _ensure_compatible(self, that): + """ + Ensures the instances have the same structure and, if so, returns length compatible + version lists (so that x.y.0.0 is equivalent to x.y). + """ + if self.prerel_tags != that.prerel_tags or self.sep != that.sep: + raise ValueError("Unable to compare: versions have different structures") + + this_version = list(self.version[:]) + that_version = list(that.version[:]) + while len(this_version) < len(that_version): this_version.append(0) + while len(that_version) < len(this_version): that_version.append(0) + + return this_version, that_version diff --git a/azurelinuxagent/common/utils/restutil.py b/azurelinuxagent/common/utils/restutil.py new file mode 100644 index 0000000..a789650 --- /dev/null +++ b/azurelinuxagent/common/utils/restutil.py @@ -0,0 +1,156 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import time +import platform +import os +import subprocess +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.exception import HttpError +from azurelinuxagent.common.future import httpclient, urlparse + +""" +REST api util functions +""" + +RETRY_WAITING_INTERVAL = 10 + +def _parse_url(url): + o = urlparse(url) + rel_uri = o.path + if o.fragment: + rel_uri = "{0}#{1}".format(rel_uri, o.fragment) + if o.query: + rel_uri = "{0}?{1}".format(rel_uri, o.query) + secure = False + if o.scheme.lower() == "https": + secure = True + return o.hostname, o.port, secure, rel_uri + +def get_http_proxy(): + """ + Get http_proxy and https_proxy from environment variables. + Username and password is not supported now. + """ + host = conf.get_httpproxy_host() + port = conf.get_httpproxy_port() + return (host, port) + +def _http_request(method, host, rel_uri, port=None, data=None, secure=False, + headers=None, proxy_host=None, proxy_port=None): + url, conn = None, None + if secure: + port = 443 if port is None else port + if proxy_host is not None and proxy_port is not None: + conn = httpclient.HTTPSConnection(proxy_host, proxy_port, timeout=10) + conn.set_tunnel(host, port) + #If proxy is used, full url is needed. + url = "https://{0}:{1}{2}".format(host, port, rel_uri) + else: + conn = httpclient.HTTPSConnection(host, port, timeout=10) + url = rel_uri + else: + port = 80 if port is None else port + if proxy_host is not None and proxy_port is not None: + conn = httpclient.HTTPConnection(proxy_host, proxy_port, timeout=10) + #If proxy is used, full url is needed. + url = "http://{0}:{1}{2}".format(host, port, rel_uri) + else: + conn = httpclient.HTTPConnection(host, port, timeout=10) + url = rel_uri + if headers == None: + conn.request(method, url, data) + else: + conn.request(method, url, data, headers) + resp = conn.getresponse() + return resp + +def http_request(method, url, data, headers=None, max_retry=3, chk_proxy=False): + """ + Sending http request to server + On error, sleep 10 and retry max_retry times. + """ + logger.verbose("HTTP Req: {0} {1}", method, url) + logger.verbose(" Data={0}", data) + logger.verbose(" Header={0}", headers) + host, port, secure, rel_uri = _parse_url(url) + + #Check proxy + proxy_host, proxy_port = (None, None) + if chk_proxy: + proxy_host, proxy_port = get_http_proxy() + + #If httplib module is not built with ssl support. Fallback to http + if secure and not hasattr(httpclient, "HTTPSConnection"): + logger.warn("httplib is not built with ssl support") + secure = False + + #If httplib module doesn't support https tunnelling. Fallback to http + if secure and \ + proxy_host is not None and \ + proxy_port is not None and \ + not hasattr(httpclient.HTTPSConnection, "set_tunnel"): + logger.warn("httplib doesn't support https tunnelling(new in python 2.7)") + secure = False + + for retry in range(0, max_retry): + try: + resp = _http_request(method, host, rel_uri, port=port, data=data, + secure=secure, headers=headers, + proxy_host=proxy_host, proxy_port=proxy_port) + logger.verbose("HTTP Resp: Status={0}", resp.status) + logger.verbose(" Header={0}", resp.getheaders()) + return resp + except httpclient.HTTPException as e: + logger.warn('HTTPException {0}, args:{1}', e, repr(e.args)) + except IOError as e: + logger.warn('Socket IOError {0}, args:{1}', e, repr(e.args)) + + if retry < max_retry - 1: + logger.info("Retry={0}, {1} {2}", retry, method, url) + time.sleep(RETRY_WAITING_INTERVAL) + + if url is not None and len(url) > 100: + url_log = url[0: 100] #In case the url is too long + else: + url_log = url + raise HttpError("HTTP Err: {0} {1}".format(method, url_log)) + +def http_get(url, headers=None, max_retry=3, chk_proxy=False): + return http_request("GET", url, data=None, headers=headers, + max_retry=max_retry, chk_proxy=chk_proxy) + +def http_head(url, headers=None, max_retry=3, chk_proxy=False): + return http_request("HEAD", url, None, headers=headers, + max_retry=max_retry, chk_proxy=chk_proxy) + +def http_post(url, data, headers=None, max_retry=3, chk_proxy=False): + return http_request("POST", url, data, headers=headers, + max_retry=max_retry, chk_proxy=chk_proxy) + +def http_put(url, data, headers=None, max_retry=3, chk_proxy=False): + return http_request("PUT", url, data, headers=headers, + max_retry=max_retry, chk_proxy=chk_proxy) + +def http_delete(url, headers=None, max_retry=3, chk_proxy=False): + return http_request("DELETE", url, None, headers=headers, + max_retry=max_retry, chk_proxy=chk_proxy) + +#End REST api util functions diff --git a/azurelinuxagent/common/utils/shellutil.py b/azurelinuxagent/common/utils/shellutil.py new file mode 100644 index 0000000..d273c92 --- /dev/null +++ b/azurelinuxagent/common/utils/shellutil.py @@ -0,0 +1,107 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import platform +import os +import subprocess +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.logger as logger + +if not hasattr(subprocess,'check_output'): + def check_output(*popenargs, **kwargs): + r"""Backport from subprocess module from python 2.7""" + if 'stdout' in kwargs: + raise ValueError('stdout argument not allowed, ' + 'it will be overridden.') + process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) + output, unused_err = process.communicate() + retcode = process.poll() + if retcode: + cmd = kwargs.get("args") + if cmd is None: + cmd = popenargs[0] + raise subprocess.CalledProcessError(retcode, cmd, output=output) + return output + + # Exception classes used by this module. + class CalledProcessError(Exception): + def __init__(self, returncode, cmd, output=None): + self.returncode = returncode + self.cmd = cmd + self.output = output + def __str__(self): + return ("Command '{0}' returned non-zero exit status {1}" + "").format(self.cmd, self.returncode) + + subprocess.check_output=check_output + subprocess.CalledProcessError=CalledProcessError + + +""" +Shell command util functions +""" +def run(cmd, chk_err=True): + """ + Calls run_get_output on 'cmd', returning only the return code. + If chk_err=True then errors will be reported in the log. + If chk_err=False then errors will be suppressed from the log. + """ + retcode,out=run_get_output(cmd,chk_err) + return retcode + +def run_get_output(cmd, chk_err=True, log_cmd=True): + """ + Wrapper for subprocess.check_output. + Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. + Reports exceptions to Error if chk_err parameter is True + """ + if log_cmd: + logger.verbose(u"run cmd '{0}'", cmd) + try: + output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) + output = ustr(output, encoding='utf-8', errors="backslashreplace") + except subprocess.CalledProcessError as e : + output = ustr(e.output, encoding='utf-8', errors="backslashreplace") + if chk_err: + if log_cmd: + logger.error(u"run cmd '{0}' failed", e.cmd) + logger.error(u"Error Code:{0}", e.returncode) + logger.error(u"Result:{0}", output) + return e.returncode, output + return 0, output + + +def quote(word_list): + """ + Quote a list or tuple of strings for Unix Shell as words, using the + byte-literal single quote. + + The resulting string is safe for use with ``shell=True`` in ``subprocess``, + and in ``os.system``. ``assert shlex.split(ShellQuote(wordList)) == wordList``. + + See POSIX.1:2013 Vol 3, Chap 2, Sec 2.2.2: + http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02_02 + """ + if not isinstance(word_list, (tuple, list)): + word_list = (word_list,) + + return " ".join(list("'{0}'".format(s.replace("'", "'\\''")) for s in word_list)) + + +# End shell command util functions diff --git a/azurelinuxagent/common/utils/textutil.py b/azurelinuxagent/common/utils/textutil.py new file mode 100644 index 0000000..f03c7e6 --- /dev/null +++ b/azurelinuxagent/common/utils/textutil.py @@ -0,0 +1,279 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ + +import base64 +import crypt +import random +import string +import struct +import sys +import xml.dom.minidom as minidom + +from distutils.version import LooseVersion as Version + + +def parse_doc(xml_text): + """ + Parse xml document from string + """ + # The minidom lib has some issue with unicode in python2. + # Encode the string into utf-8 first + xml_text = xml_text.encode('utf-8') + return minidom.parseString(xml_text) + + +def findall(root, tag, namespace=None): + """ + Get all nodes by tag and namespace under Node root. + """ + if root is None: + return [] + + if namespace is None: + return root.getElementsByTagName(tag) + else: + return root.getElementsByTagNameNS(namespace, tag) + + +def find(root, tag, namespace=None): + """ + Get first node by tag and namespace under Node root. + """ + nodes = findall(root, tag, namespace=namespace) + if nodes is not None and len(nodes) >= 1: + return nodes[0] + else: + return None + + +def gettext(node): + """ + Get node text + """ + if node is None: + return None + + for child in node.childNodes: + if child.nodeType == child.TEXT_NODE: + return child.data + return None + + +def findtext(root, tag, namespace=None): + """ + Get text of node by tag and namespace under Node root. + """ + node = find(root, tag, namespace=namespace) + return gettext(node) + + +def getattrib(node, attr_name): + """ + Get attribute of xml node + """ + if node is not None: + return node.getAttribute(attr_name) + else: + return None + + +def unpack(buf, offset, range): + """ + Unpack bytes into python values. + """ + result = 0 + for i in range: + result = (result << 8) | str_to_ord(buf[offset + i]) + return result + + +def unpack_little_endian(buf, offset, length): + """ + Unpack little endian bytes into python values. + """ + return unpack(buf, offset, list(range(length - 1, -1, -1))) + + +def unpack_big_endian(buf, offset, length): + """ + Unpack big endian bytes into python values. + """ + return unpack(buf, offset, list(range(0, length))) + + +def hex_dump3(buf, offset, length): + """ + Dump range of buf in formatted hex. + """ + return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) + + +def hex_dump2(buf): + """ + Dump buf in formatted hex. + """ + return hex_dump3(buf, 0, len(buf)) + + +def is_in_range(a, low, high): + """ + Return True if 'a' in 'low' <= a >= 'high' + """ + return (a >= low and a <= high) + + +def is_printable(ch): + """ + Return True if character is displayable. + """ + return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) + or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) + or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) + + +def hex_dump(buffer, size): + """ + Return Hex formated dump of a 'buffer' of 'size'. + """ + if size < 0: + size = len(buffer) + result = "" + for i in range(0, size): + if (i % 16) == 0: + result += "%06X: " % i + byte = buffer[i] + if type(byte) == str: + byte = ord(byte.decode('latin1')) + result += "%02X " % byte + if (i & 15) == 7: + result += " " + if ((i + 1) % 16) == 0 or (i + 1) == size: + j = i + while ((j + 1) % 16) != 0: + result += " " + if (j & 7) == 7: + result += " " + j += 1 + result += " " + for j in range(i - (i % 16), i + 1): + byte = buffer[j] + if type(byte) == str: + byte = str_to_ord(byte.decode('latin1')) + k = '.' + if is_printable(byte): + k = chr(byte) + result += k + if (i + 1) != size: + result += "\n" + return result + + +def str_to_ord(a): + """ + Allows indexing into a string or an array of integers transparently. + Generic utility function. + """ + if type(a) == type(b'') or type(a) == type(u''): + a = ord(a) + return a + + +def compare_bytes(a, b, start, length): + for offset in range(start, start + length): + if str_to_ord(a[offset]) != str_to_ord(b[offset]): + return False + return True + + +def int_to_ip4_addr(a): + """ + Build DHCP request string. + """ + return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, + (a >> 16) & 0xFF, + (a >> 8) & 0xFF, + (a) & 0xFF) + + +def hexstr_to_bytearray(a): + """ + Return hex string packed into a binary struct. + """ + b = b"" + for c in range(0, len(a) // 2): + b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) + return b + + +def set_ssh_config(config, name, val): + notfound = True + for i in range(0, len(config)): + if config[i].startswith(name): + config[i] = "{0} {1}".format(name, val) + notfound = False + elif config[i].startswith("Match"): + # Match block must be put in the end of sshd config + break + if notfound: + config.insert(i, "{0} {1}".format(name, val)) + return config + + +def set_ini_config(config, name, val): + notfound = True + nameEqual = name + '=' + length = len(config) + text = "{0}=\"{1}\"".format(name, val) + + for i in reversed(range(0, length)): + if config[i].startswith(nameEqual): + config[i] = text + notfound = False + break + + if notfound: + config.insert(length - 1, text) + + +def remove_bom(c): + if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \ + str_to_ord(c[2]) > 128: + c = c[3:] + return c + + +def gen_password_hash(password, crypt_id, salt_len): + collection = string.ascii_letters + string.digits + salt = ''.join(random.choice(collection) for _ in range(salt_len)) + salt = "${0}${1}".format(crypt_id, salt) + return crypt.crypt(password, salt) + + +def get_bytes_from_pem(pem_str): + base64_bytes = "" + for line in pem_str.split('\n'): + if "----" not in line: + base64_bytes += line + return base64_bytes + + +def b64encode(s): + from azurelinuxagent.common.version import PY_VERSION_MAJOR + if PY_VERSION_MAJOR > 2: + return base64.b64encode(bytes(s, 'utf-8')).decode('utf-8') + return base64.b64encode(s) diff --git a/azurelinuxagent/common/version.py b/azurelinuxagent/common/version.py new file mode 100644 index 0000000..6c4b475 --- /dev/null +++ b/azurelinuxagent/common/version.py @@ -0,0 +1,116 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import platform +import sys + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.future import ustr + + +def get_distro(): + if 'FreeBSD' in platform.system(): + release = re.sub('\-.*\Z', '', ustr(platform.release())) + osinfo = ['freebsd', release, '', 'freebsd'] + elif 'linux_distribution' in dir(platform): + osinfo = list(platform.linux_distribution(full_distribution_name=0)) + full_name = platform.linux_distribution()[0].strip() + osinfo.append(full_name) + else: + osinfo = platform.dist() + + # The platform.py lib has issue with detecting oracle linux distribution. + # Merge the following patch provided by oracle as a temparory fix. + if os.path.exists("/etc/oracle-release"): + osinfo[2] = "oracle" + osinfo[3] = "Oracle Linux" + + # Remove trailing whitespace and quote in distro name + osinfo[0] = osinfo[0].strip('"').strip(' ').lower() + return osinfo + + +AGENT_NAME = "WALinuxAgent" +AGENT_LONG_NAME = "Azure Linux Agent" +AGENT_VERSION = '2.1.5' +AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) +AGENT_DESCRIPTION = """\ +The Azure Linux Agent supports the provisioning and running of Linux +VMs in the Azure cloud. This package should be installed on Linux disk +images that are built to run in the Azure environment. +""" + +AGENT_DIR_GLOB = "{0}-*".format(AGENT_NAME) +AGENT_PKG_GLOB = "{0}-*.zip".format(AGENT_NAME) + +AGENT_PATTERN = "{0}-(.*)".format(AGENT_NAME) +AGENT_NAME_PATTERN = re.compile(AGENT_PATTERN) +AGENT_DIR_PATTERN = re.compile(".*/{0}".format(AGENT_PATTERN)) + + +# Set the CURRENT_AGENT and CURRENT_VERSION to match the agent directory name +# - This ensures the agent will "see itself" using the same name and version +# as the code that downloads agents. +def set_current_agent(): + path = os.getcwd() + lib_dir = conf.get_lib_dir() + if lib_dir[-1] != os.path.sep: + lib_dir += os.path.sep + if path[:len(lib_dir)] != lib_dir: + agent = AGENT_LONG_VERSION + version = AGENT_VERSION + else: + agent = path[len(lib_dir):].split(os.path.sep)[0] + version = AGENT_NAME_PATTERN.match(agent).group(1) + return agent, FlexibleVersion(version) +CURRENT_AGENT, CURRENT_VERSION = set_current_agent() + +def is_current_agent_installed(): + return CURRENT_AGENT == AGENT_LONG_VERSION + + +__distro__ = get_distro() +DISTRO_NAME = __distro__[0] +DISTRO_VERSION = __distro__[1] +DISTRO_CODE_NAME = __distro__[2] +DISTRO_FULL_NAME = __distro__[3] + +PY_VERSION = sys.version_info +PY_VERSION_MAJOR = sys.version_info[0] +PY_VERSION_MINOR = sys.version_info[1] +PY_VERSION_MICRO = sys.version_info[2] + +""" +Add this workaround for detecting Snappy Ubuntu Core temporarily, until ubuntu +fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 +""" + + +def is_snappy(): + if os.path.exists("/etc/motd"): + motd = fileutil.read_file("/etc/motd") + if "snappy" in motd: + return True + return False + + +if is_snappy(): + DISTRO_FULL_NAME = "Snappy Ubuntu Core" diff --git a/azurelinuxagent/conf.py b/azurelinuxagent/conf.py deleted file mode 100644 index 7921e79..0000000 --- a/azurelinuxagent/conf.py +++ /dev/null @@ -1,166 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -""" -Module conf loads and parses configuration file -""" -import os -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.exception import AgentConfigError - -class ConfigurationProvider(object): - """ - Parse amd store key:values in /etc/waagent.conf. - """ - def __init__(self): - self.values = dict() - - def load(self, content): - if not content: - raise AgentConfigError("Can't not parse empty configuration") - for line in content.split('\n'): - if not line.startswith("#") and "=" in line: - parts = line.split()[0].split('=') - value = parts[1].strip("\" ") - if value != "None": - self.values[parts[0]] = value - else: - self.values[parts[0]] = None - - def get(self, key, default_val): - val = self.values.get(key) - return val if val is not None else default_val - - def get_switch(self, key, default_val): - val = self.values.get(key) - if val is not None and val.lower() == 'y': - return True - elif val is not None and val.lower() == 'n': - return False - return default_val - - def get_int(self, key, default_val): - try: - return int(self.values.get(key)) - except TypeError: - return default_val - except ValueError: - return default_val - - -__conf__ = ConfigurationProvider() - -def load_conf_from_file(conf_file_path, conf=__conf__): - """ - Load conf file from: conf_file_path - """ - if os.path.isfile(conf_file_path) == False: - raise AgentConfigError(("Missing configuration in {0}" - "").format(conf_file_path)) - try: - content = fileutil.read_file(conf_file_path) - conf.load(content) - except IOError as err: - raise AgentConfigError(("Failed to load conf file:{0}, {1}" - "").format(conf_file_path, err)) - -def get_logs_verbose(conf=__conf__): - return conf.get_switch("Logs.Verbose", False) - -def get_lib_dir(conf=__conf__): - return conf.get("Lib.Dir", "/var/lib/waagent") - -def get_dvd_mount_point(conf=__conf__): - return conf.get("DVD.MountPoint", "/mnt/cdrom/secure") - -def get_agent_pid_file_path(conf=__conf__): - return conf.get("Pid.File", "/var/run/waagent.pid") - -def get_ext_log_dir(conf=__conf__): - return conf.get("Extension.LogDir", "/var/log/azure") - -def get_openssl_cmd(conf=__conf__): - return conf.get("OS.OpensslPath", "/usr/bin/openssl") - -def get_home_dir(conf=__conf__): - return conf.get("OS.HomeDir", "/home") - -def get_passwd_file_path(conf=__conf__): - return conf.get("OS.PasswordPath", "/etc/shadow") - -def get_sshd_conf_file_path(conf=__conf__): - return conf.get("OS.SshdConfigPath", "/etc/ssh/sshd_config") - -def get_root_device_scsi_timeout(conf=__conf__): - return conf.get("OS.RootDeviceScsiTimeout", None) - -def get_ssh_host_keypair_type(conf=__conf__): - return conf.get("Provisioning.SshHostKeyPairType", "rsa") - -def get_provision_enabled(conf=__conf__): - return conf.get_switch("Provisioning.Enabled", True) - -def get_allow_reset_sys_user(conf=__conf__): - return conf.get_switch("Provisioning.AllowResetSysUser", False) - -def get_regenerate_ssh_host_key(conf=__conf__): - return conf.get_switch("Provisioning.RegenerateSshHostKeyPair", False) - -def get_delete_root_password(conf=__conf__): - return conf.get_switch("Provisioning.DeleteRootPassword", False) - -def get_decode_customdata(conf=__conf__): - return conf.get_switch("Provisioning.DecodeCustomData", False) - -def get_execute_customdata(conf=__conf__): - return conf.get_switch("Provisioning.ExecuteCustomData", False) - -def get_password_cryptid(conf=__conf__): - return conf.get("Provisioning.PasswordCryptId", "6") - -def get_password_crypt_salt_len(conf=__conf__): - return conf.get_int("Provisioning.PasswordCryptSaltLength", 10) - -def get_monitor_hostname(conf=__conf__): - return conf.get_switch("Provisioning.MonitorHostName", False) - -def get_httpproxy_host(conf=__conf__): - return conf.get("HttpProxy.Host", None) - -def get_httpproxy_port(conf=__conf__): - return conf.get("HttpProxy.Port", None) - -def get_detect_scvmm_env(conf=__conf__): - return conf.get_switch("DetectScvmmEnv", False) - -def get_resourcedisk_format(conf=__conf__): - return conf.get_switch("ResourceDisk.Format", False) - -def get_resourcedisk_enable_swap(conf=__conf__): - return conf.get_switch("ResourceDisk.EnableSwap", False) - -def get_resourcedisk_mountpoint(conf=__conf__): - return conf.get("ResourceDisk.MountPoint", "/mnt/resource") - -def get_resourcedisk_filesystem(conf=__conf__): - return conf.get("ResourceDisk.Filesystem", "ext3") - -def get_resourcedisk_swap_size_mb(conf=__conf__): - return conf.get_int("ResourceDisk.SwapSizeMB", 0) - diff --git a/azurelinuxagent/daemon/__init__.py b/azurelinuxagent/daemon/__init__.py new file mode 100644 index 0000000..979e01b --- /dev/null +++ b/azurelinuxagent/daemon/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.daemon.main import get_daemon_handler diff --git a/azurelinuxagent/daemon/main.py b/azurelinuxagent/daemon/main.py new file mode 100644 index 0000000..d3185a1 --- /dev/null +++ b/azurelinuxagent/daemon/main.py @@ -0,0 +1,130 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import sys +import time +import traceback + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.event as event +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.rdma import RDMADeviceHandler, setup_rdma_device +from azurelinuxagent.common.utils.textutil import parse_doc, find, getattrib +from azurelinuxagent.common.version import AGENT_LONG_NAME, AGENT_VERSION, \ + DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME, PY_VERSION_MAJOR, \ + PY_VERSION_MINOR, PY_VERSION_MICRO +from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler +from azurelinuxagent.daemon.scvmm import get_scvmm_handler +from azurelinuxagent.pa.provision import get_provision_handler +from azurelinuxagent.pa.rdma import get_rdma_handler +from azurelinuxagent.ga.update import get_update_handler + +def get_daemon_handler(): + return DaemonHandler() + +class DaemonHandler(object): + """ + Main thread of daemon. It will invoke other threads to do actual work + """ + def __init__(self): + self.running = True + self.osutil = get_osutil() + + def run(self): + logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) + logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) + logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, + PY_VERSION_MICRO) + + self.check_pid() + + while self.running: + try: + self.daemon() + except Exception as e: + err_msg = traceback.format_exc() + add_event("WALA", is_success=False, message=ustr(err_msg), + op=WALAEventOperation.UnhandledError) + logger.info("Sleep 15 seconds and restart daemon") + time.sleep(15) + + + def check_pid(self): + """Check whether daemon is already running""" + pid = None + pid_file = conf.get_agent_pid_file_path() + if os.path.isfile(pid_file): + pid = fileutil.read_file(pid_file) + + if self.osutil.check_pid_alive(pid): + logger.info("Daemon is already running: {0}", pid) + sys.exit(0) + + fileutil.write_file(pid_file, ustr(os.getpid())) + + def daemon(self): + logger.info("Run daemon") + + self.protocol_util = get_protocol_util() + self.scvmm_handler = get_scvmm_handler() + self.resourcedisk_handler = get_resourcedisk_handler() + self.rdma_handler = get_rdma_handler() + self.provision_handler = get_provision_handler() + self.update_handler = get_update_handler() + + # Create lib dir + if not os.path.isdir(conf.get_lib_dir()): + fileutil.mkdir(conf.get_lib_dir(), mode=0o700) + os.chdir(conf.get_lib_dir()) + + if conf.get_detect_scvmm_env(): + self.scvmm_handler.run() + + if conf.get_resourcedisk_format(): + self.resourcedisk_handler.run() + + # Always redetermine the protocol start (e.g., wireserver vs. + # on-premise) since a VHD can move between environments + self.protocol_util.clear_protocol() + + self.provision_handler.run() + + # Enable RDMA, continue in errors + if conf.enable_rdma(): + self.rdma_handler.install_driver() + + logger.info("RDMA capabilities are enabled in configuration") + try: + setup_rdma_device() + except Exception as e: + logger.error("Error setting up rdma device: %s" % e) + else: + logger.info("RDMA capabilities are not enabled, skipping") + + while self.running: + self.update_handler.run_latest() diff --git a/azurelinuxagent/daemon/resourcedisk/__init__.py b/azurelinuxagent/daemon/resourcedisk/__init__.py new file mode 100644 index 0000000..021cecd --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/__init__.py @@ -0,0 +1,20 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.daemon.resourcedisk.factory import get_resourcedisk_handler diff --git a/azurelinuxagent/daemon/resourcedisk/default.py b/azurelinuxagent/daemon/resourcedisk/default.py new file mode 100644 index 0000000..d2e400a --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/default.py @@ -0,0 +1,219 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import re +import sys +import threading +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.event import add_event, WALAEventOperation +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.exception import ResourceDiskError +from azurelinuxagent.common.osutil import get_osutil + +DATALOSS_WARNING_FILE_NAME="DATALOSS_WARNING_README.txt" +DATA_LOSS_WARNING="""\ +WARNING: THIS IS A TEMPORARY DISK. + +Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. + +Please do not use this disk for storing any personal or application data. + +For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx +""" + +class ResourceDiskHandler(object): + def __init__(self): + self.osutil = get_osutil() + + def start_activate_resource_disk(self): + disk_thread = threading.Thread(target = self.run) + disk_thread.start() + + def run(self): + mount_point = None + if conf.get_resourcedisk_format(): + mount_point = self.activate_resource_disk() + if mount_point is not None and \ + conf.get_resourcedisk_enable_swap(): + self.enable_swap(mount_point) + + def activate_resource_disk(self): + logger.info("Activate resource disk") + try: + mount_point = conf.get_resourcedisk_mountpoint() + fs = conf.get_resourcedisk_filesystem() + mount_point = self.mount_resource_disk(mount_point, fs) + warning_file = os.path.join(mount_point, DATALOSS_WARNING_FILE_NAME) + try: + fileutil.write_file(warning_file, DATA_LOSS_WARNING) + except IOError as e: + logger.warn("Failed to write data loss warnning:{0}", e) + return mount_point + except ResourceDiskError as e: + logger.error("Failed to mount resource disk {0}", e) + add_event(name="WALA", is_success=False, message=ustr(e), + op=WALAEventOperation.ActivateResourceDisk) + + def enable_swap(self, mount_point): + logger.info("Enable swap") + try: + size_mb = conf.get_resourcedisk_swap_size_mb() + self.create_swap_space(mount_point, size_mb) + except ResourceDiskError as e: + logger.error("Failed to enable swap {0}", e) + + def mount_resource_disk(self, mount_point, fs): + device = self.osutil.device_for_ide_port(1) + if device is None: + raise ResourceDiskError("unable to detect disk topology") + + device = "/dev/" + device + mountlist = shellutil.run_get_output("mount")[1] + existing = self.osutil.get_mount_point(mountlist, device) + + if(existing): + logger.info("Resource disk {0}1 is already mounted", device) + return existing + + fileutil.mkdir(mount_point, mode=0o755) + + logger.info("Detect GPT...") + partition = device + "1" + ret = shellutil.run_get_output("parted {0} print".format(device)) + if ret[0]: + raise ResourceDiskError("({0}) {1}".format(device, ret[1])) + + if "gpt" in ret[1]: + logger.info("GPT detected") + logger.info("Get GPT partitions") + parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] + logger.info("Found more than {0} GPT partitions.", len(parts)) + if len(parts) > 1: + logger.info("Remove old GPT partitions") + for i in range(1, len(parts) + 1): + logger.info("Remove partition: {0}", i) + shellutil.run("parted {0} rm {1}".format(device, i)) + + logger.info("Create a new GPT partition using entire disk space") + shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) + + logger.info("Format partition: {0} with fstype {1}",partition,fs) + shellutil.run("mkfs." + fs + " " + partition + " -F") + else: + logger.info("GPT not detected") + logger.info("Check fstype") + ret = shellutil.run_get_output("sfdisk -q -c {0} 1".format(device)) + if ret[1].rstrip() == "7" and fs != "ntfs": + logger.info("The partition is formatted with ntfs") + logger.info("Format partition: {0} with fstype {1}",partition,fs) + shellutil.run("sfdisk -c {0} 1 83".format(device)) + shellutil.run("mkfs." + fs + " " + partition + " -F") + + logger.info("Mount resource disk") + ret = shellutil.run("mount {0} {1}".format(partition, mount_point), + chk_err=False) + if ret: + logger.warn("Failed to mount resource disk. Retry mounting") + shellutil.run("mkfs." + fs + " " + partition + " -F") + ret = shellutil.run("mount {0} {1}".format(partition, mount_point)) + if ret: + raise ResourceDiskError("({0}) {1}".format(partition, ret)) + + logger.info("Resource disk ({0}) is mounted at {1} with fstype {2}", + device, mount_point, fs) + return mount_point + + def create_swap_space(self, mount_point, size_mb): + size_kb = size_mb * 1024 + size = size_kb * 1024 + swapfile = os.path.join(mount_point, 'swapfile') + swaplist = shellutil.run_get_output("swapon -s")[1] + + if swapfile in swaplist and os.path.getsize(swapfile) == size: + logger.info("Swap already enabled") + return + + if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: + logger.info("Remove old swap file") + shellutil.run("swapoff -a", chk_err=False) + os.remove(swapfile) + + if not os.path.isfile(swapfile): + logger.info("Create swap file") + self.mkfile(swapfile, size_kb * 1024) + shellutil.run("mkswap {0}".format(swapfile)) + if shellutil.run("swapon {0}".format(swapfile)): + raise ResourceDiskError("{0}".format(swapfile)) + logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) + + def mkfile(self, filename, nbytes): + """ + Create a non-sparse file of that size. Deletes and replaces existing file. + + To allow efficient execution, fallocate will be tried first. This includes + ``os.posix_fallocate`` on Python 3.3+ (unix) and the ``fallocate`` command + in the popular ``util-linux{,-ng}`` package. + + A dd fallback will be tried too. When size < 64M, perform single-pass dd. + Otherwise do two-pass dd. + """ + + if not isinstance(nbytes, int): + nbytes = int(nbytes) + + if nbytes < 0: + raise ValueError(nbytes) + + if os.path.isfile(filename): + os.remove(filename) + + # os.posix_fallocate + if sys.version_info >= (3, 3): + # Probable errors: + # - OSError: Seen on Cygwin, libc notimpl? + # - AttributeError: What if someone runs this under... + with open(filename, 'w') as f: + try: + os.posix_fallocate(f.fileno(), 0, nbytes) + return 0 + except: + # Not confident with this thing, just keep trying... + pass + + # fallocate command + fn_sh = shellutil.quote((filename,)) + ret = shellutil.run(u"fallocate -l {0} {1}".format(nbytes, fn_sh)) + if ret != 127: # 127 = command not found + return ret + + # dd fallback + dd_maxbs = 64 * 1024 ** 2 + dd_cmd = "dd if=/dev/zero bs={0} count={1} conv=notrunc of={2}" + + blocks = int(nbytes / dd_maxbs) + if blocks > 0: + ret = shellutil.run(dd_cmd.format(dd_maxbs, fn_sh, blocks)) << 8 + + remains = int(nbytes % dd_maxbs) + if remains > 0: + ret += shellutil.run(dd_cmd.format(remains, fn_sh, 1)) + + return ret diff --git a/azurelinuxagent/daemon/resourcedisk/factory.py b/azurelinuxagent/daemon/resourcedisk/factory.py new file mode 100644 index 0000000..76e5a23 --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/factory.py @@ -0,0 +1,33 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, \ + DISTRO_VERSION, \ + DISTRO_FULL_NAME +from .default import ResourceDiskHandler +from .freebsd import FreeBSDResourceDiskHandler + +def get_resourcedisk_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "freebsd": + return FreeBSDResourceDiskHandler() + + return ResourceDiskHandler() + diff --git a/azurelinuxagent/daemon/resourcedisk/freebsd.py b/azurelinuxagent/daemon/resourcedisk/freebsd.py new file mode 100644 index 0000000..36a3ac9 --- /dev/null +++ b/azurelinuxagent/daemon/resourcedisk/freebsd.py @@ -0,0 +1,117 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.exception import ResourceDiskError +from azurelinuxagent.daemon.resourcedisk.default import ResourceDiskHandler + +class FreeBSDResourceDiskHandler(ResourceDiskHandler): + """ + This class handles resource disk mounting for FreeBSD. + + The resource disk locates at following slot: + scbus2 on blkvsc1 bus 0: + at scbus2 target 1 lun 0 (da1,pass2) + + There are 2 variations based on partition table type: + 1. MBR: The resource disk partition is /dev/da1s1 + 2. GPT: The resource disk partition is /dev/da1p2, /dev/da1p1 is for reserved usage. + """ + def __init__(self): + super(FreeBSDResourceDiskHandler, self).__init__() + + @staticmethod + def parse_gpart_list(data): + dic = {} + for line in data.split('\n'): + if line.find("Geom name: ") != -1: + geom_name = line[11:] + elif line.find("scheme: ") != -1: + dic[geom_name] = line[8:] + return dic + + def mount_resource_disk(self, mount_point, fs): + if fs != 'ufs': + raise ResourceDiskError("Unsupported filesystem type:{0}, only ufs is supported.".format(fs)) + + # 1. Detect device + err, output = shellutil.run_get_output('gpart list') + if err: + raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + disks = self.parse_gpart_list(output) + + err, output = shellutil.run_get_output('camcontrol periphlist 2:1:0') + if err: + raise ResourceDiskError("Unable to detect resource disk device:{0}".format(output)) + + # 'da1: generation: 4 index: 1 status: MORE\npass2: generation: 4 index: 2 status: LAST\n' + device = None + for line in output.split('\n'): + index = line.find(':') + if index > 0: + geom_name = line[:index] + if geom_name in disks: + device = geom_name + break + + if not device: + raise ResourceDiskError("Unable to detect resource disk device.") + logger.info('Resource disk device {0} found.', device) + + # 2. Detect partition + partition_table_type = disks[device] + + if partition_table_type == 'MBR': + provider_name = device + 's1' + elif partition_table_type == 'GPT': + provider_name = device + 'p2' + else: + raise ResourceDiskError("Unsupported partition table type:{0}".format(output)) + + err, output = shellutil.run_get_output('gpart show -p {0}'.format(device)) + if err or output.find(provider_name) == -1: + raise ResourceDiskError("Resource disk partition not found.") + + partition = '/dev/' + provider_name + logger.info('Resource disk partition {0} found.', partition) + + # 3. Mount partition + mount_list = shellutil.run_get_output("mount")[1] + existing = self.osutil.get_mount_point(mount_list, partition) + + if existing: + logger.info("Resource disk {0} is already mounted", partition) + return existing + + fileutil.mkdir(mount_point, mode=0o755) + mount_cmd = 'mount -t {0} {1} {2}'.format(fs, partition, mount_point) + err = shellutil.run(mount_cmd, chk_err=False) + if err: + logger.info('Creating {0} filesystem on partition {1}'.format(fs, partition)) + err, output = shellutil.run_get_output('newfs -U {0}'.format(partition)) + if err: + raise ResourceDiskError("Failed to create new filesystem on partition {0}, error:{1}" + .format(partition, output)) + err, output = shellutil.run_get_output(mount_cmd, chk_err=False) + if err: + raise ResourceDiskError("Failed to mount partition {0}, error {1}".format(partition, output)) + + logger.info("Resource disk partition {0} is mounted at {1} with fstype {2}", partition, mount_point, fs) + return mount_point diff --git a/azurelinuxagent/daemon/scvmm.py b/azurelinuxagent/daemon/scvmm.py new file mode 100644 index 0000000..dc6832a --- /dev/null +++ b/azurelinuxagent/daemon/scvmm.py @@ -0,0 +1,74 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import re +import os +import sys +import subprocess +import time +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.osutil import get_osutil + +VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" +VMM_STARTUP_SCRIPT_NAME= "install" + +def get_scvmm_handler(): + return ScvmmHandler() + +class ScvmmHandler(object): + def __init__(self): + self.osutil = get_osutil() + + def detect_scvmm_env(self, dev_dir='/dev'): + logger.info("Detecting Microsoft System Center VMM Environment") + found=False + + # try to load the ATAPI driver, continue on failure + self.osutil.try_load_atapiix_mod() + + # cycle through all available /dev/sr*|hd*|cdrom*|cd* looking for the scvmm configuration file + mount_point = conf.get_dvd_mount_point() + for devices in filter(lambda x: x is not None, [re.match(r'(sr[0-9]|hd[c-z]|cdrom[0-9]?|cd[0-9]+)', dev) for dev in os.listdir(dev_dir)]): + dvd_device = os.path.join(dev_dir, devices.group(0)) + self.osutil.mount_dvd(max_retry=1, chk_err=False, dvd_device=dvd_device, mount_point=mount_point) + found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) + if found: + self.start_scvmm_agent(mount_point=mount_point) + break + else: + self.osutil.umount_dvd(chk_err=False, mount_point=mount_point) + + return found + + def start_scvmm_agent(self, mount_point=None): + logger.info("Starting Microsoft System Center VMM Initialization " + "Process") + if mount_point is None: + mount_point = conf.get_dvd_mount_point() + startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) + devnull = open(os.devnull, 'w') + subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point], + stdout=devnull, stderr=devnull) + + def run(self): + if self.detect_scvmm_env(): + logger.info("Exiting") + time.sleep(300) + sys.exit(0) diff --git a/azurelinuxagent/distro/__init__.py b/azurelinuxagent/distro/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/__init__.py +++ b/azurelinuxagent/distro/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/coreos/__init__.py b/azurelinuxagent/distro/coreos/__init__.py deleted file mode 100644 index 8c1bbdb..0000000 --- a/azurelinuxagent/distro/coreos/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# diff --git a/azurelinuxagent/distro/coreos/deprovision.py b/azurelinuxagent/distro/coreos/deprovision.py deleted file mode 100644 index 9642579..0000000 --- a/azurelinuxagent/distro/coreos/deprovision.py +++ /dev/null @@ -1,33 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler, DeprovisionAction - -class CoreOSDeprovisionHandler(DeprovisionHandler): - def __init__(self, distro): - self.distro = distro - - def setup(self, deluser): - warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) - warnings.append("WARNING! /etc/machine-id will be removed.") - files_to_del = ['/etc/machine-id'] - actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) - return warnings, actions - diff --git a/azurelinuxagent/distro/coreos/distro.py b/azurelinuxagent/distro/coreos/distro.py deleted file mode 100644 index 04c7bff..0000000 --- a/azurelinuxagent/distro/coreos/distro.py +++ /dev/null @@ -1,29 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.coreos.osutil import CoreOSUtil -from azurelinuxagent.distro.coreos.deprovision import CoreOSDeprovisionHandler - -class CoreOSDistro(DefaultDistro): - def __init__(self): - super(CoreOSDistro, self).__init__() - self.osutil = CoreOSUtil() - self.deprovision_handler = CoreOSDeprovisionHandler(self) - diff --git a/azurelinuxagent/distro/coreos/osutil.py b/azurelinuxagent/distro/coreos/osutil.py deleted file mode 100644 index ffc83e3..0000000 --- a/azurelinuxagent/distro/coreos/osutil.py +++ /dev/null @@ -1,95 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil - -class CoreOSUtil(DefaultOSUtil): - def __init__(self): - super(CoreOSUtil, self).__init__() - self.agent_conf_file_path = '/usr/share/oem/waagent.conf' - self.waagent_path='/usr/share/oem/bin/waagent' - self.python_path='/usr/share/oem/python/bin' - if 'PATH' in os.environ: - path = "{0}:{1}".format(os.environ['PATH'], self.python_path) - else: - path = self.python_path - os.environ['PATH'] = path - - if 'PYTHONPATH' in os.environ: - py_path = os.environ['PYTHONPATH'] - py_path = "{0}:{1}".format(py_path, self.waagent_path) - else: - py_path = self.waagent_path - os.environ['PYTHONPATH'] = py_path - - def is_sys_user(self, username): - #User 'core' is not a sysuser - if username == 'core': - return False - return super(CoreOSUtil, self).is_sys_user(username) - - def is_dhcp_enabled(self): - return True - - def start_network(self) : - return shellutil.run("systemctl start systemd-networkd", chk_err=False) - - def restart_if(self, iface): - shellutil.run("systemctl restart systemd-networkd") - - def restart_ssh_service(self): - return shellutil.run("systemctl restart sshd", chk_err=False) - - def stop_dhcp_service(self): - return shellutil.run("systemctl stop systemd-networkd", chk_err=False) - - def start_dhcp_service(self): - return shellutil.run("systemctl start systemd-networkd", chk_err=False) - - def start_agent_service(self): - return shellutil.run("systemctl start wagent", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("systemctl stop wagent", chk_err=False) - - def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof systemd-networkd") - return ret[1] if ret[0] == 0 else None - - def set_ssh_client_alive_interval(self): - #In CoreOS, /etc/sshd_config is mount readonly. Skip the setting - pass - - def conf_sshd(self, disable_password): - #In CoreOS, /etc/sshd_config is mount readonly. Skip the setting - pass - diff --git a/azurelinuxagent/distro/debian/__init__.py b/azurelinuxagent/distro/debian/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/distro/debian/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/azurelinuxagent/distro/debian/distro.py b/azurelinuxagent/distro/debian/distro.py deleted file mode 100644 index 01f4e3e..0000000 --- a/azurelinuxagent/distro/debian/distro.py +++ /dev/null @@ -1,27 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.debian.osutil import DebianOSUtil - -class DebianDistro(DefaultDistro): - def __init__(self): - super(DebianDistro, self).__init__() - self.osutil = DebianOSUtil() - diff --git a/azurelinuxagent/distro/debian/loader.py b/azurelinuxagent/distro/debian/loader.py deleted file mode 100644 index cc0c06f..0000000 --- a/azurelinuxagent/distro/debian/loader.py +++ /dev/null @@ -1,24 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - - -def get_osutil(): - from azurelinuxagent.distro.debian.osutil import DebianOSUtil - return DebianOSUtil() - diff --git a/azurelinuxagent/distro/debian/osutil.py b/azurelinuxagent/distro/debian/osutil.py deleted file mode 100644 index a40c1de..0000000 --- a/azurelinuxagent/distro/debian/osutil.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil - -class DebianOSUtil(DefaultOSUtil): - def __init__(self): - super(DebianOSUtil, self).__init__() - - def restart_ssh_service(self): - return shellutil.run("service sshd restart", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("service azurelinuxagent stop", chk_err=False) - - def start_agent_service(self): - return shellutil.run("service azurelinuxagent start", chk_err=False) - diff --git a/azurelinuxagent/distro/default/__init__.py b/azurelinuxagent/distro/default/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/distro/default/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/azurelinuxagent/distro/default/daemon.py b/azurelinuxagent/distro/default/daemon.py deleted file mode 100644 index cf9eb16..0000000 --- a/azurelinuxagent/distro/default/daemon.py +++ /dev/null @@ -1,103 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import time -import sys -import traceback -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.metadata import AGENT_LONG_NAME, AGENT_VERSION, \ - DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_FULL_NAME, PY_VERSION_MAJOR, \ - PY_VERSION_MINOR, PY_VERSION_MICRO -import azurelinuxagent.event as event -import azurelinuxagent.utils.fileutil as fileutil - - -class DaemonHandler(object): - def __init__(self, distro): - self.distro = distro - self.running = True - - - def run(self): - logger.info("{0} Version:{1}", AGENT_LONG_NAME, AGENT_VERSION) - logger.info("OS: {0} {1}", DISTRO_NAME, DISTRO_VERSION) - logger.info("Python: {0}.{1}.{2}", PY_VERSION_MAJOR, PY_VERSION_MINOR, - PY_VERSION_MICRO) - - self.check_pid() - - while self.running: - try: - self.daemon() - except Exception as e: - err_msg = traceback.format_exc() - add_event("WALA", is_success=False, message=ustr(err_msg), - op=WALAEventOperation.UnhandledError) - logger.info("Sleep 15 seconds and restart daemon") - time.sleep(15) - - def check_pid(self): - """Check whether daemon is already running""" - pid = None - pid_file = conf.get_agent_pid_file_path() - if os.path.isfile(pid_file): - pid = fileutil.read_file(pid_file) - - if pid is not None and os.path.isdir(os.path.join("/proc", pid)): - logger.info("Daemon is already running: {0}", pid) - sys.exit(0) - - fileutil.write_file(pid_file, ustr(os.getpid())) - - def daemon(self): - logger.info("Run daemon") - #Create lib dir - if not os.path.isdir(conf.get_lib_dir()): - fileutil.mkdir(conf.get_lib_dir(), mode=0o700) - os.chdir(conf.get_lib_dir()) - - if conf.get_detect_scvmm_env(): - if self.distro.scvmm_handler.run(): - return - - self.distro.provision_handler.run() - - if conf.get_resourcedisk_format(): - self.distro.resource_disk_handler.run() - - try: - protocol = self.distro.protocol_util.detect_protocol() - except ProtocolError as e: - logger.error("Failed to detect protocol, exit", e) - return - - self.distro.event_handler.run() - self.distro.env_handler.run() - - while self.running: - #Handle extensions - self.distro.ext_handlers_handler.run() - time.sleep(25) - diff --git a/azurelinuxagent/distro/default/deprovision.py b/azurelinuxagent/distro/default/deprovision.py deleted file mode 100644 index 4db4cdc..0000000 --- a/azurelinuxagent/distro/default/deprovision.py +++ /dev/null @@ -1,125 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import azurelinuxagent.conf as conf -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.future import read_input -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil - -class DeprovisionAction(object): - def __init__(self, func, args=[], kwargs={}): - self.func = func - self.args = args - self.kwargs = kwargs - - def invoke(self): - self.func(*self.args, **self.kwargs) - -class DeprovisionHandler(object): - def __init__(self, distro): - self.distro = distro - - def del_root_password(self, warnings, actions): - warnings.append("WARNING! root password will be disabled. " - "You will not be able to login as root.") - - actions.append(DeprovisionAction(self.distro.osutil.del_root_password)) - - def del_user(self, warnings, actions): - - try: - ovfenv = self.distro.protocol_util.get_ovf_env() - except ProtocolError: - warnings.append("WARNING! ovf-env.xml is not found.") - warnings.append("WARNING! Skip delete user.") - return - - username = ovfenv.username - warnings.append(("WARNING! {0} account and entire home directory " - "will be deleted.").format(username)) - actions.append(DeprovisionAction(self.distro.osutil.del_account, - [username])) - - - def regen_ssh_host_key(self, warnings, actions): - warnings.append("WARNING! All SSH host key pairs will be deleted.") - actions.append(DeprovisionAction(shellutil.run, - ['rm -f /etc/ssh/ssh_host_*key*'])) - - def stop_agent_service(self, warnings, actions): - warnings.append("WARNING! The waagent service will be stopped.") - actions.append(DeprovisionAction(self.distro.osutil.stop_agent_service)) - - def del_files(self, warnings, actions): - files_to_del = ['/root/.bash_history', '/var/log/waagent.log'] - actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) - - def del_dhcp_lease(self, warnings, actions): - warnings.append("WARNING! Cached DHCP leases will be deleted.") - dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] - actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) - - def del_lib_dir(self, warnings, actions): - dirs_to_del = [conf.get_lib_dir()] - actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) - - def reset_hostname(self, warnings, actions): - localhost = ["localhost.localdomain"] - actions.append(DeprovisionAction(self.distro.osutil.set_hostname, - localhost)) - actions.append(DeprovisionAction(self.distro.osutil.set_dhcp_hostname, - localhost)) - - def setup(self, deluser): - warnings = [] - actions = [] - - self.stop_agent_service(warnings, actions) - if conf.get_regenerate_ssh_host_key(): - self.regen_ssh_host_key(warnings, actions) - - self.del_dhcp_lease(warnings, actions) - self.reset_hostname(warnings, actions) - - if conf.get_delete_root_password(): - self.del_root_password(warnings, actions) - - self.del_lib_dir(warnings, actions) - self.del_files(warnings, actions) - - if deluser: - self.del_user(warnings, actions) - - return warnings, actions - - def run(self, force=False, deluser=False): - warnings, actions = self.setup(deluser) - for warning in warnings: - print(warning) - - if not force: - confirm = read_input("Do you want to proceed (y/n)") - if not confirm.lower().startswith('y'): - return - - for action in actions: - action.invoke() - - diff --git a/azurelinuxagent/distro/default/dhcp.py b/azurelinuxagent/distro/default/dhcp.py deleted file mode 100644 index fc439d2..0000000 --- a/azurelinuxagent/distro/default/dhcp.py +++ /dev/null @@ -1,318 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -import os -import socket -import array -import time -import threading -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.textutil import hex_dump, hex_dump2, hex_dump3, \ - compare_bytes, str_to_ord, \ - unpack_big_endian, \ - unpack_little_endian, \ - int_to_ip4_addr -from azurelinuxagent.exception import DhcpError - - -class DhcpHandler(object): - """ - Azure use DHCP option 245 to pass endpoint ip to VMs. - """ - def __init__(self, distro): - self.distro = distro - self.endpoint = None - self.gateway = None - self.routes = None - - def run(self): - """ - Send dhcp request - Configure default gateway and routes - Save wire server endpoint if found - """ - self.send_dhcp_req() - self.conf_routes() - - def wait_for_network(self): - """ - Wait for network stack to be initialized. - """ - ipv4 = self.distro.osutil.get_ip4_addr() - while ipv4 == '' or ipv4 == '0.0.0.0': - logger.info("Waiting for network.") - time.sleep(10) - logger.info("Try to start network interface.") - self.distro.osutil.start_network() - ipv4 = self.distro.osutil.get_ip4_addr() - - def conf_routes(self): - logger.info("Configure routes") - logger.info("Gateway:{0}", self.gateway) - logger.info("Routes:{0}", self.routes) - #Add default gateway - if self.gateway is not None: - self.distro.osutil.route_add(0 , 0, self.gateway) - if self.routes is not None: - for route in self.routes: - self.distro.osutil.route_add(route[0], route[1], route[2]) - - def _send_dhcp_req(self, request): - __waiting_duration__ = [0, 10, 30, 60, 60] - for duration in __waiting_duration__: - try: - self.distro.osutil.allow_dhcp_broadcast() - response = socket_send(request) - validate_dhcp_resp(request, response) - return response - except DhcpError as e: - logger.warn("Failed to send DHCP request: {0}", e) - time.sleep(duration) - return None - - def send_dhcp_req(self): - """ - Build dhcp request with mac addr - Configure route to allow dhcp traffic - Stop dhcp service if necessary - """ - logger.info("Send dhcp request") - mac_addr = self.distro.osutil.get_mac_addr() - req = build_dhcp_request(mac_addr) - - # Temporary allow broadcast for dhcp. Remove the route when done. - missing_default_route = self.distro.osutil.is_missing_default_route() - ifname = self.distro.osutil.get_if_name() - if missing_default_route: - self.distro.osutil.set_route_for_dhcp_broadcast(ifname) - - # In some distros, dhcp service needs to be shutdown before agent probe - # endpoint through dhcp. - if self.distro.osutil.is_dhcp_enabled(): - self.distro.osutil.stop_dhcp_service() - - resp = self._send_dhcp_req(req) - - if self.distro.osutil.is_dhcp_enabled(): - self.distro.osutil.start_dhcp_service() - - if missing_default_route: - self.distro.osutil.remove_route_for_dhcp_broadcast(ifname) - - if resp is None: - raise DhcpError("Failed to receive dhcp response.") - self.endpoint, self.gateway, self.routes = parse_dhcp_resp(resp) - -def validate_dhcp_resp(request, response): - bytes_recv = len(response) - if bytes_recv < 0xF6: - logger.error("HandleDhcpResponse: Too few bytes received:{0}", - bytes_recv) - return False - - logger.verb("BytesReceived:{0}", hex(bytes_recv)) - logger.verb("DHCP response:{0}", hex_dump(response, bytes_recv)) - - # check transactionId, cookie, MAC address cookie should never mismatch - # transactionId and MAC address may mismatch if we see a response - # meant from another machine - if not compare_bytes(request, response, 0xEC, 4): - logger.verb("Cookie not match:\nsend={0},\nreceive={1}", - hex_dump3(request, 0xEC, 4), - hex_dump3(response, 0xEC, 4)) - raise DhcpError("Cookie in dhcp respones doesn't match the request") - - if not compare_bytes(request, response, 4, 4): - logger.verb("TransactionID not match:\nsend={0},\nreceive={1}", - hex_dump3(request, 4, 4), - hex_dump3(response, 4, 4)) - raise DhcpError("TransactionID in dhcp respones " - "doesn't match the request") - - if not compare_bytes(request, response, 0x1C, 6): - logger.verb("Mac Address not match:\nsend={0},\nreceive={1}", - hex_dump3(request, 0x1C, 6), - hex_dump3(response, 0x1C, 6)) - raise DhcpError("Mac Addr in dhcp respones " - "doesn't match the request") - -def parse_route(response, option, i, length, bytes_recv): - # http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx - logger.verb("Routes at offset: {0} with length:{1}", hex(i), hex(length)) - routes = [] - if length < 5: - logger.error("Data too small for option:{0}", option) - j = i + 2 - while j < (i + length + 2): - mask_len_bits = str_to_ord(response[j]) - mask_len_bytes = (((mask_len_bits + 7) & ~7) >> 3) - mask = 0xFFFFFFFF & (0xFFFFFFFF << (32 - mask_len_bits)) - j += 1 - net = unpack_big_endian(response, j, mask_len_bytes) - net <<= (32 - mask_len_bytes * 8) - net &= mask - j += mask_len_bytes - gateway = unpack_big_endian(response, j, 4) - j += 4 - routes.append((net, mask, gateway)) - if j != (i + length + 2): - logger.error("Unable to parse routes") - return routes - -def parse_ip_addr(response, option, i, length, bytes_recv): - if i + 5 < bytes_recv: - if length != 4: - logger.error("Endpoint or Default Gateway not 4 bytes") - return None - addr = unpack_big_endian(response, i + 2, 4) - ip_addr = int_to_ip4_addr(addr) - return ip_addr - else: - logger.error("Data too small for option:{0}", option) - return None - -def parse_dhcp_resp(response): - """ - Parse DHCP response: - Returns endpoint server or None on error. - """ - logger.verb("parse Dhcp Response") - bytes_recv = len(response) - endpoint = None - gateway = None - routes = None - - # Walk all the returned options, parsing out what we need, ignoring the - # others. We need the custom option 245 to find the the endpoint we talk to, - # as well as, to handle some Linux DHCP client incompatibilities, - # options 3 for default gateway and 249 for routes. And 255 is end. - - i = 0xF0 # offset to first option - while i < bytes_recv: - option = str_to_ord(response[i]) - length = 0 - if (i + 1) < bytes_recv: - length = str_to_ord(response[i + 1]) - logger.verb("DHCP option {0} at offset:{1} with length:{2}", - hex(option), hex(i), hex(length)) - if option == 255: - logger.verb("DHCP packet ended at offset:{0}", hex(i)) - break - elif option == 249: - routes = parse_route(response, option, i, length, bytes_recv) - elif option == 3: - gateway = parse_ip_addr(response, option, i, length, bytes_recv) - logger.verb("Default gateway:{0}, at {1}", gateway, hex(i)) - elif option == 245: - endpoint = parse_ip_addr(response, option, i, length, bytes_recv) - logger.verb("Azure wire protocol endpoint:{0}, at {1}", gateway, - hex(i)) - else: - logger.verb("Skipping DHCP option:{0} at {1} with length {2}", - hex(option), hex(i), hex(length)) - i += length + 2 - return endpoint, gateway, routes - -def socket_send(request): - sock = None - try: - sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, - socket.IPPROTO_UDP) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.bind(("0.0.0.0", 68)) - sock.sendto(request, ("", 67)) - sock.settimeout(10) - logger.verb("Send DHCP request: Setting socket.timeout=10, " - "entering recv") - response = sock.recv(1024) - return response - except IOError as e: - raise DhcpError("{0}".format(e)) - finally: - if sock is not None: - sock.close() - -def build_dhcp_request(mac_addr): - """ - Build DHCP request string. - """ - # - # typedef struct _DHCP { - # UINT8 Opcode; /* op: BOOTREQUEST or BOOTREPLY */ - # UINT8 HardwareAddressType; /* htype: ethernet */ - # UINT8 HardwareAddressLength; /* hlen: 6 (48 bit mac address) */ - # UINT8 Hops; /* hops: 0 */ - # UINT8 TransactionID[4]; /* xid: random */ - # UINT8 Seconds[2]; /* secs: 0 */ - # UINT8 Flags[2]; /* flags: 0 or 0x8000 for broadcast */ - # UINT8 ClientIpAddress[4]; /* ciaddr: 0 */ - # UINT8 YourIpAddress[4]; /* yiaddr: 0 */ - # UINT8 ServerIpAddress[4]; /* siaddr: 0 */ - # UINT8 RelayAgentIpAddress[4]; /* giaddr: 0 */ - # UINT8 ClientHardwareAddress[16]; /* chaddr: 6 byte eth MAC address */ - # UINT8 ServerName[64]; /* sname: 0 */ - # UINT8 BootFileName[128]; /* file: 0 */ - # UINT8 MagicCookie[4]; /* 99 130 83 99 */ - # /* 0x63 0x82 0x53 0x63 */ - # /* options -- hard code ours */ - # - # UINT8 MessageTypeCode; /* 53 */ - # UINT8 MessageTypeLength; /* 1 */ - # UINT8 MessageType; /* 1 for DISCOVER */ - # UINT8 End; /* 255 */ - # } DHCP; - # - - # tuple of 244 zeros - # (struct.pack_into would be good here, but requires Python 2.5) - request = [0] * 244 - - trans_id = gen_trans_id() - - # Opcode = 1 - # HardwareAddressType = 1 (ethernet/MAC) - # HardwareAddressLength = 6 (ethernet/MAC/48 bits) - for a in range(0, 3): - request[a] = [1, 1, 6][a] - - # fill in transaction id (random number to ensure response matches request) - for a in range(0, 4): - request[4 + a] = str_to_ord(trans_id[a]) - - logger.verb("BuildDhcpRequest: transactionId:%s,%04X" % ( - hex_dump2(trans_id), - unpack_big_endian(request, 4, 4))) - - # fill in ClientHardwareAddress - for a in range(0, 6): - request[0x1C + a] = str_to_ord(mac_addr[a]) - - # DHCP Magic Cookie: 99, 130, 83, 99 - # MessageTypeCode = 53 DHCP Message Type - # MessageTypeLength = 1 - # MessageType = DHCPDISCOVER - # End = 255 DHCP_END - for a in range(0, 8): - request[0xEC + a] = [99, 130, 83, 99, 53, 1, 1, 255][a] - return array.array("B", request) - -def gen_trans_id(): - return os.urandom(4) diff --git a/azurelinuxagent/distro/default/distro.py b/azurelinuxagent/distro/default/distro.py deleted file mode 100644 index ca0d77e..0000000 --- a/azurelinuxagent/distro/default/distro.py +++ /dev/null @@ -1,51 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.conf import ConfigurationProvider -from azurelinuxagent.distro.default.osutil import DefaultOSUtil -from azurelinuxagent.distro.default.daemon import DaemonHandler -from azurelinuxagent.distro.default.init import InitHandler -from azurelinuxagent.distro.default.monitor import MonitorHandler -from azurelinuxagent.distro.default.dhcp import DhcpHandler -from azurelinuxagent.distro.default.protocolUtil import ProtocolUtil -from azurelinuxagent.distro.default.scvmm import ScvmmHandler -from azurelinuxagent.distro.default.env import EnvHandler -from azurelinuxagent.distro.default.provision import ProvisionHandler -from azurelinuxagent.distro.default.resourceDisk import ResourceDiskHandler -from azurelinuxagent.distro.default.extension import ExtHandlersHandler -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler - -class DefaultDistro(object): - """ - """ - def __init__(self): - self.osutil = DefaultOSUtil() - self.protocol_util = ProtocolUtil(self) - - self.init_handler = InitHandler(self) - self.daemon_handler = DaemonHandler(self) - self.event_handler = MonitorHandler(self) - self.dhcp_handler = DhcpHandler(self) - self.scvmm_handler = ScvmmHandler(self) - self.env_handler = EnvHandler(self) - self.provision_handler = ProvisionHandler(self) - self.resource_disk_handler = ResourceDiskHandler(self) - self.ext_handlers_handler = ExtHandlersHandler(self) - self.deprovision_handler = DeprovisionHandler(self) - diff --git a/azurelinuxagent/distro/default/env.py b/azurelinuxagent/distro/default/env.py deleted file mode 100644 index 7878cff..0000000 --- a/azurelinuxagent/distro/default/env.py +++ /dev/null @@ -1,104 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import socket -import threading -import time -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf - -class EnvHandler(object): - """ - Monitor changes to dhcp and hostname. - If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. - - Monitor scsi disk. - If new scsi disk found, set timeout - """ - def __init__(self, distro): - self.distro = distro - self.stopped = True - self.hostname = None - self.dhcpid = None - self.server_thread=None - - def run(self): - if not self.stopped: - logger.info("Stop existing env monitor service.") - self.stop() - - self.stopped = False - logger.info("Start env monitor service.") - self.distro.dhcp_handler.conf_routes() - self.hostname = socket.gethostname() - self.dhcpid = self.distro.osutil.get_dhcp_pid() - self.server_thread = threading.Thread(target = self.monitor) - self.server_thread.setDaemon(True) - self.server_thread.start() - - def monitor(self): - """ - Monitor dhcp client pid and hostname. - If dhcp clinet process re-start has occurred, reset routes. - """ - while not self.stopped: - self.distro.osutil.remove_rules_files() - timeout = conf.get_root_device_scsi_timeout() - if timeout is not None: - self.distro.osutil.set_scsi_disks_timeout(timeout) - if conf.get_monitor_hostname(): - self.handle_hostname_update() - self.handle_dhclient_restart() - time.sleep(5) - - def handle_hostname_update(self): - curr_hostname = socket.gethostname() - if curr_hostname != self.hostname: - logger.info("EnvMonitor: Detected host name change: {0} -> {1}", - self.hostname, curr_hostname) - self.distro.osutil.set_hostname(curr_hostname) - self.distro.osutil.publish_hostname(curr_hostname) - self.hostname = curr_hostname - - def handle_dhclient_restart(self): - if self.dhcpid is None: - logger.warn("Dhcp client is not running. ") - self.dhcpid = self.distro.osutil.get_dhcp_pid() - return - - #The dhcp process hasn't changed since last check - if os.path.isdir(os.path.join('/proc', self.dhcpid.strip())): - return - - newpid = self.distro.osutil.get_dhcp_pid() - if newpid is not None and newpid != self.dhcpid: - logger.info("EnvMonitor: Detected dhcp client restart. " - "Restoring routing table.") - self.distro.dhcp_handler.conf_routes() - self.dhcpid = newpid - - def stop(self): - """ - Stop server comminucation and join the thread to main thread. - """ - self.stopped = True - if self.server_thread is not None: - self.server_thread.join() - diff --git a/azurelinuxagent/distro/default/extension.py b/azurelinuxagent/distro/default/extension.py deleted file mode 100644 index 82cdfed..0000000 --- a/azurelinuxagent/distro/default/extension.py +++ /dev/null @@ -1,817 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -import os -import zipfile -import time -import json -import subprocess -import shutil -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ExtensionError, ProtocolError, HttpError -from azurelinuxagent.future import ustr -from azurelinuxagent.metadata import AGENT_VERSION -from azurelinuxagent.protocol.restapi import ExtHandlerStatus, ExtensionStatus, \ - ExtensionSubStatus, Extension, \ - VMStatus, ExtHandler, \ - get_properties, set_properties -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.restutil as restutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.textutil import Version - -#HandlerEnvironment.json schema version -HANDLER_ENVIRONMENT_VERSION = 1.0 - -VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] - -VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] - -def validate_has_key(obj, key, fullname): - if key not in obj: - raise ExtensionError("Missing: {0}".format(fullname)) - -def validate_in_range(val, valid_range, name): - if val not in valid_range: - raise ExtensionError("Invalid {0}: {1}".format(name, val)) - -def parse_formatted_message(formatted_message): - if formatted_message is None: - return None - validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') - validate_has_key(formatted_message, 'message', 'formattedMessage/message') - return formatted_message.get('message') - -def parse_ext_substatus(substatus): - #Check extension sub status format - validate_has_key(substatus, 'status', 'substatus/status') - validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, - 'substatus/status') - status = ExtensionSubStatus() - status.name = substatus.get('name') - status.status = substatus.get('status') - status.code = substatus.get('code', 0) - formatted_message = substatus.get('formattedMessage') - status.message = parse_formatted_message(formatted_message) - return status - -def parse_ext_status(ext_status, data): - if data is None or len(data) is None: - return - #Currently, only the first status will be reported - data = data[0] - #Check extension status format - validate_has_key(data, 'status', 'status') - status_data = data['status'] - validate_has_key(status_data, 'status', 'status/status') - - validate_in_range(status_data['status'], VALID_EXTENSION_STATUS, - 'status/status') - - applied_time = status_data.get('configurationAppliedTime') - ext_status.configurationAppliedTime = applied_time - ext_status.operation = status_data.get('operation') - ext_status.status = status_data.get('status') - ext_status.code = status_data.get('code', 0) - formatted_message = status_data.get('formattedMessage') - ext_status.message = parse_formatted_message(formatted_message) - substatus_list = status_data.get('substatus') - if substatus_list is None: - return - for substatus in substatus_list: - ext_status.substatusList.append(parse_ext_substatus(substatus)) - -class ExtHandlerState(object): - NotInstalled = "NotInstalled" - Installed = "Installed" - Enabled = "Enabled" - -class ExtHandlersHandler(object): - def __init__(self, distro): - self.distro = distro - self.ext_handlers = None - self.last_etag = None - self.log_report = False - - def run(self): - ext_handlers, etag = None, None - try: - self.protocol = self.distro.protocol_util.get_protocol() - ext_handlers, etag = self.protocol.get_ext_handlers() - except ProtocolError as e: - add_event(name="WALA", is_success=False, message=ustr(e)) - return - - if self.last_etag is not None and self.last_etag == etag: - logger.verb("No change to ext handler config:{0}, skip", etag) - self.log_report = False - else: - logger.info("Handle new ext handler config") - self.log_report = True #Log status report success on new config - self.handle_ext_handlers(ext_handlers) - self.last_etag = etag - - self.report_ext_handlers_status(ext_handlers) - - def handle_ext_handlers(self, ext_handlers): - if ext_handlers.extHandlers is None or \ - len(ext_handlers.extHandlers) == 0: - logger.info("No ext handler config found") - return - - for ext_handler in ext_handlers.extHandlers: - #TODO handle install in sequence, enable in parallel - self.handle_ext_handler(ext_handler) - - def handle_ext_handler(self, ext_handler): - ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) - try: - state = ext_handler.properties.state - ext_handler_i.logger.info("Expected handler state: {0}", state) - if state == "enabled": - self.handle_enable(ext_handler_i) - elif state == u"disabled": - self.handle_disable(ext_handler_i) - elif state == u"uninstall": - self.handle_uninstall(ext_handler_i) - else: - message = u"Unknown ext handler state:{0}".format(state) - raise ExtensionError(message) - except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) - ext_handler_i.report_event(message=ustr(e), is_success=False) - - def handle_enable(self, ext_handler_i): - - ext_handler_i.decide_version() - - old_ext_handler_i = ext_handler_i.get_installed_ext_handler() - if old_ext_handler_i is not None and \ - old_ext_handler_i.version_gt(ext_handler_i): - raise ExtensionError(u"Downgrade not allowed") - - handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) - if handler_state == ExtHandlerState.NotInstalled: - ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) - - ext_handler_i.download() - - ext_handler_i.update_settings() - - if old_ext_handler_i is None: - ext_handler_i.install() - elif ext_handler_i.version_gt(old_ext_handler_i): - old_ext_handler_i.disable() - ext_handler_i.copy_status_files(old_ext_handler_i) - ext_handler_i.update() - old_ext_handler_i.uninstall() - old_ext_handler_i.rm_ext_handler_dir() - ext_handler_i.update_with_install() - else: - ext_handler_i.update_settings() - - ext_handler_i.enable() - - def handle_disable(self, ext_handler_i): - handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) - if handler_state == ExtHandlerState.Enabled: - ext_handler_i.disable() - - def handle_uninstall(self, ext_handler_i): - handler_state = ext_handler_i.get_handler_state() - ext_handler_i.logger.info("Current handler state is: {0}", handler_state) - if handler_state != ExtHandlerState.NotInstalled: - if handler_state == ExtHandlerState.Enabled: - ext_handler_i.disable() - ext_handler_i.uninstall() - ext_handler_i.rm_ext_handler_dir() - - def report_ext_handlers_status(self, ext_handlers): - """Go thru handler_state dir, collect and report status""" - vm_status = VMStatus() - vm_status.vmAgent.version = AGENT_VERSION - vm_status.vmAgent.status = "Ready" - vm_status.vmAgent.message = "Guest Agent is running" - - if ext_handlers is not None: - for ext_handler in ext_handlers.extHandlers: - try: - self.report_ext_handler_status(vm_status, ext_handler) - except ExtensionError as e: - add_event(name="WALA", is_success=False, message=ustr(e)) - - logger.verb("Report vm agent status") - - try: - self.protocol.report_vm_status(vm_status) - except ProtocolError as e: - message = "Failed to report vm agent status: {0}".format(e) - add_event(name="WALA", is_success=False, message=message) - - if self.log_report: - logger.info("Successfully reported vm agent status") - - - def report_ext_handler_status(self, vm_status, ext_handler): - ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) - - handler_status = ext_handler_i.get_handler_status() - if handler_status is None: - return - - handler_state = ext_handler_i.get_handler_state() - if handler_state != ExtHandlerState.NotInstalled: - try: - active_exts = ext_handler_i.report_ext_status() - handler_status.extensions.extend(active_exts) - except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) - - try: - heartbeat = ext_handler_i.collect_heartbeat() - if heartbeat is not None: - handler_status.status = heartbeat.get('status') - except ExtensionError as e: - ext_handler_i.set_handler_status(message=ustr(e), code=-1) - - vm_status.vmAgent.extensionHandlers.append(handler_status) - -class ExtHandlerInstance(object): - def __init__(self, ext_handler, protocol): - self.ext_handler = ext_handler - self.protocol = protocol - self.operation = None - self.pkg = None - - prefix = "[{0}]".format(self.get_full_name()) - self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) - - try: - fileutil.mkdir(self.get_log_dir(), mode=0o744) - except IOError as e: - self.logger.error(u"Failed to create extension log dir: {0}", e) - - log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") - self.logger.add_appender(logger.AppenderType.FILE, - logger.LogLevel.INFO, log_file) - - def decide_version(self): - """ - If auto-upgrade, get the largest public extension version under - the requested major version family of currently installed plugin version - - Else, get the highest hot-fix for requested version, - """ - self.logger.info("Decide which version to use") - try: - pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) - except ProtocolError as e: - raise ExtensionError("Failed to get ext handler pkgs", e) - - version = self.ext_handler.properties.version - update_policy = self.ext_handler.properties.upgradePolicy - - version_frag = version.split('.') - if len(version_frag) < 2: - raise ExtensionError("Wrong version format: {0}".format(version)) - - version_prefix = None - if update_policy is not None and update_policy == 'auto': - version_prefix = "{0}.".format(version_frag[0]) - else: - version_prefix = "{0}.{1}.".format(version_frag[0], version_frag[1]) - - packages = [x for x in pkg_list.versions \ - if x.version.startswith(version_prefix) or \ - x.version == version] - - packages = sorted(packages, key=lambda x: Version(x.version), - reverse=True) - - if len(packages) <= 0: - raise ExtensionError("Failed to find and valid extension package") - self.pkg = packages[0] - self.ext_handler.properties.version = packages[0].version - self.logger.info("Use version: {0}", self.pkg.version) - - def version_gt(self, other): - self_version = self.ext_handler.properties.version - other_version = other.ext_handler.properties.version - return Version(self_version) > Version(other_version) - - def get_installed_ext_handler(self): - lastest_version = None - ext_handler_name = self.ext_handler.name - - for dir_name in os.listdir(conf.get_lib_dir()): - path = os.path.join(conf.get_lib_dir(), dir_name) - if os.path.isdir(path) and dir_name.startswith(ext_handler_name): - seperator = dir_name.rfind('-') - if seperator < 0: - continue - installed_name = dir_name[0: seperator] - installed_version = dir_name[seperator + 1:] - if installed_name != ext_handler_name: - continue - if lastest_version is None or \ - Version(lastest_version) < Version(installed_version): - lastest_version = installed_version - - if lastest_version is None: - return None - - data = get_properties(self.ext_handler) - old_ext_handler = ExtHandler() - set_properties("ExtHandler", old_ext_handler, data) - old_ext_handler.properties.version = lastest_version - return ExtHandlerInstance(old_ext_handler, self.protocol) - - def copy_status_files(self, old_ext_handler_i): - self.logger.info("Copy status files from old plugin to new") - old_ext_dir = old_ext_handler_i.get_base_dir() - new_ext_dir = self.get_base_dir() - - old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq") - if os.path.isfile(old_ext_mrseq_file): - shutil.copy2(old_ext_mrseq_file, new_ext_dir) - - old_ext_status_dir = old_ext_handler_i.get_status_dir() - new_ext_status_dir = self.get_status_dir() - - if os.path.isdir(old_ext_status_dir): - for status_file in os.listdir(old_ext_status_dir): - status_file = os.path.join(old_ext_status_dir, status_file) - if os.path.isfile(status_file): - shutil.copy2(status_file, new_ext_status_dir) - - def set_operation(self, op): - self.operation = op - - def report_event(self, message="", is_success=True): - version = self.ext_handler.properties.version - add_event(name=self.ext_handler.name, version=version, message=message, - op=self.operation, is_success=is_success) - - def download(self): - self.logger.info("Download extension package") - self.set_operation(WALAEventOperation.Download) - if self.pkg is None: - raise ExtensionError("No package uri found") - - package = None - for uri in self.pkg.uris: - try: - package = self.protocol.download_ext_handler_pkg(uri.uri) - except ProtocolError as e: - logger.warn("Failed download extension: {0}", e) - - if package is None: - raise ExtensionError("Failed to download extension") - - self.logger.info("Unpack extension package") - pkg_file = os.path.join(conf.get_lib_dir(), - os.path.basename(uri.uri) + ".zip") - try: - fileutil.write_file(pkg_file, bytearray(package), asbin=True) - zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) - except IOError as e: - raise ExtensionError(u"Failed to write and unzip plugin", e) - - chmod = "find {0} -type f | xargs chmod u+x".format(self.get_base_dir()) - shellutil.run(chmod) - self.report_event(message="Download succeeded") - - self.logger.info("Initialize extension directory") - #Save HandlerManifest.json - man_file = fileutil.search_file(self.get_base_dir(), - 'HandlerManifest.json') - - if man_file is None: - raise ExtensionError("HandlerManifest.json not found") - - try: - man = fileutil.read_file(man_file, remove_bom=True) - fileutil.write_file(self.get_manifest_file(), man) - except IOError as e: - raise ExtensionError(u"Failed to save HandlerManifest.json", e) - - #Create status and config dir - try: - status_dir = self.get_status_dir() - fileutil.mkdir(status_dir, mode=0o700) - conf_dir = self.get_conf_dir() - fileutil.mkdir(conf_dir, mode=0o700) - except IOError as e: - raise ExtensionError(u"Failed to create status or config dir", e) - - #Save HandlerEnvironment.json - self.create_handler_env() - - def enable(self): - self.logger.info("Enable extension.") - self.set_operation(WALAEventOperation.Enable) - - man = self.load_manifest() - self.launch_command(man.get_enable_command()) - self.set_handler_state(ExtHandlerState.Enabled) - self.set_handler_status(status="Ready", message="Plugin enabled") - - def disable(self): - self.logger.info("Disable extension.") - self.set_operation(WALAEventOperation.Disable) - - man = self.load_manifest() - self.launch_command(man.get_disable_command(), timeout=900) - self.set_handler_state(ExtHandlerState.Installed) - self.set_handler_status(status="NotReady", message="Plugin disabled") - - def install(self): - self.logger.info("Install extension.") - self.set_operation(WALAEventOperation.Install) - - man = self.load_manifest() - self.launch_command(man.get_install_command(), timeout=900) - self.set_handler_state(ExtHandlerState.Installed) - - def uninstall(self): - self.logger.info("Uninstall extension.") - self.set_operation(WALAEventOperation.UnInstall) - - try: - man = self.load_manifest() - self.launch_command(man.get_uninstall_command()) - except ExtensionError as e: - self.report_event(message=ustr(e), is_success=False) - - def rm_ext_handler_dir(self): - try: - handler_state_dir = self.get_handler_state_dir() - if os.path.isdir(handler_state_dir): - self.logger.info("Remove ext handler dir: {0}", handler_state_dir) - shutil.rmtree(handler_state_dir) - base_dir = self.get_base_dir() - if os.path.isdir(base_dir): - self.logger.info("Remove ext handler dir: {0}", base_dir) - shutil.rmtree(base_dir) - except IOError as e: - message = "Failed to rm ext handler dir: {0}".format(e) - self.report_event(message=message, is_success=False) - - def update(self): - self.logger.info("Update extension.") - self.set_operation(WALAEventOperation.Update) - - man = self.load_manifest() - self.launch_command(man.get_update_command(), timeout=900) - - def update_with_install(self): - man = self.load_manifest() - if man.is_update_with_install(): - self.install() - else: - self.logger.info("UpdateWithInstall not set. " - "Skip install during upgrade.") - self.set_handler_state(ExtHandlerState.Installed) - - def get_largest_seq_no(self): - seq_no = -1 - conf_dir = self.get_conf_dir() - for item in os.listdir(conf_dir): - item_path = os.path.join(conf_dir, item) - if os.path.isfile(item_path): - try: - seperator = item.rfind(".") - if seperator > 0 and item[seperator + 1:] == 'settings': - curr_seq_no = int(item.split('.')[0]) - if curr_seq_no > seq_no: - seq_no = curr_seq_no - except Exception as e: - self.logger.verb("Failed to parse file name: {0}", item) - continue - return seq_no - - def collect_ext_status(self, ext): - self.logger.verb("Collect extension status") - - seq_no = self.get_largest_seq_no() - if seq_no == -1: - return None - - status_dir = self.get_status_dir() - ext_status_file = "{0}.status".format(seq_no) - ext_status_file = os.path.join(status_dir, ext_status_file) - - ext_status = ExtensionStatus(seq_no=seq_no) - try: - data_str = fileutil.read_file(ext_status_file) - data = json.loads(data_str) - parse_ext_status(ext_status, data) - except IOError as e: - ext_status.message = u"Failed to get status file {0}".format(e) - ext_status.code = -1 - ext_status.status = "error" - except ValueError as e: - ext_status.message = u"Malformed status file {0}".format(e) - ext_status.code = -1 - ext_status.status = "error" - - return ext_status - - def report_ext_status(self): - active_exts = [] - for ext in self.ext_handler.properties.extensions: - ext_status = self.collect_ext_status(ext) - if ext_status is None: - continue - try: - self.protocol.report_ext_status(self.ext_handler.name, ext.name, - ext_status) - active_exts.append(ext.name) - except ProtocolError as e: - self.logger.error(u"Failed to report extension status: {0}", e) - return active_exts - - def collect_heartbeat(self): - man = self.load_manifest() - if not man.is_report_heartbeat(): - return - heartbeat_file = os.path.join(conf.get_lib_dir(), - self.get_heartbeat_file()) - - self.logger.info("Collect heart beat") - if not os.path.isfile(heartbeat_file): - raise ExtensionError("Failed to get heart beat file") - if not self.is_responsive(heartbeat_file): - return { - "status": "Unresponsive", - "code": -1, - "message": "Extension heartbeat is not responsive" - } - try: - heartbeat_json = fileutil.read_file(heartbeat_file) - heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] - except IOError as e: - raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) - except ValueError as e: - raise ExtensionError("Malformed heartbeat file: {0}".format(e)) - return heartbeat - - def is_responsive(self, heartbeat_file): - last_update=int(time.time() - os.stat(heartbeat_file).st_mtime) - return last_update > 600 # not updated for more than 10 min - - def launch_command(self, cmd, timeout=300): - self.logger.info("Launch command:{0}", cmd) - base_dir = self.get_base_dir() - try: - devnull = open(os.devnull, 'w') - child = subprocess.Popen(base_dir + "/" + cmd, shell=True, - cwd=base_dir, stdout=devnull) - except Exception as e: - #TODO do not catch all exception - raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) - - retry = timeout / 5 - while retry > 0 and child.poll == None: - time.sleep(5) - retry -= 1 - if retry == 0: - os.kill(child.pid, 9) - raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd)) - - ret = child.wait() - if ret == None or ret != 0: - raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd)) - - self.report_event(message="Launch command succeeded: {0}".format(cmd)) - - def load_manifest(self): - man_file = self.get_manifest_file() - try: - data = json.loads(fileutil.read_file(man_file)) - except IOError as e: - raise ExtensionError('Failed to load manifest file.') - except ValueError as e: - raise ExtensionError('Malformed manifest file.') - - return HandlerManifest(data[0]) - - def update_settings_file(self, settings_file, settings): - settings_file = os.path.join(self.get_conf_dir(), settings_file) - try: - fileutil.write_file(settings_file, settings) - except IOError as e: - raise ExtensionError(u"Failed to update settings file", e) - - def update_settings(self): - if self.ext_handler.properties.extensions is None or \ - len(self.ext_handler.properties.extensions) == 0: - #This is the behavior of waagent 2.0.x - #The new agent has to be consistent with the old one. - self.logger.info("Extension has no settings, write empty 0.settings") - self.update_settings_file("0.settings", "") - return - - for ext in self.ext_handler.properties.extensions: - settings = { - 'publicSettings': ext.publicSettings, - 'protectedSettings': ext.protectedSettings, - 'protectedSettingsCertThumbprint': ext.certificateThumbprint - } - ext_settings = { - "runtimeSettings":[{ - "handlerSettings": settings - }] - } - settings_file = "{0}.settings".format(ext.sequenceNumber) - self.logger.info("Update settings file: {0}", settings_file) - self.update_settings_file(settings_file, json.dumps(ext_settings)) - - def create_handler_env(self): - env = [{ - "name": self.ext_handler.name, - "version" : HANDLER_ENVIRONMENT_VERSION, - "handlerEnvironment" : { - "logFolder" : self.get_log_dir(), - "configFolder" : self.get_conf_dir(), - "statusFolder" : self.get_status_dir(), - "heartbeatFile" : self.get_heartbeat_file() - } - }] - try: - fileutil.write_file(self.get_env_file(), json.dumps(env)) - except IOError as e: - raise ExtensionError(u"Failed to save handler environment", e) - - def get_handler_state_dir(self): - return os.path.join(conf.get_lib_dir(), "handler_state", - self.get_full_name()) - - def set_handler_state(self, handler_state): - state_dir = self.get_handler_state_dir() - if not os.path.exists(state_dir): - try: - fileutil.mkdir(state_dir, 0o700) - except IOError as e: - self.logger.error("Failed to create state dir: {0}", e) - - try: - state_file = os.path.join(state_dir, "state") - fileutil.write_file(state_file, handler_state) - except IOError as e: - self.logger.error("Failed to set state: {0}", e) - - def get_handler_state(self): - state_dir = self.get_handler_state_dir() - state_file = os.path.join(state_dir, "state") - if not os.path.isfile(state_file): - return ExtHandlerState.NotInstalled - - try: - return fileutil.read_file(state_file) - except IOError as e: - self.logger.error("Failed to get state: {0}", e) - return ExtHandlerState.NotInstalled - - def set_handler_status(self, status="NotReady", message="", - code=0): - state_dir = self.get_handler_state_dir() - if not os.path.exists(state_dir): - try: - fileutil.mkdir(state_dir, 0o700) - except IOError as e: - self.logger.error("Failed to create state dir: {0}", e) - - handler_status = ExtHandlerStatus() - handler_status.name = self.ext_handler.name - handler_status.version = self.ext_handler.properties.version - handler_status.message = message - handler_status.code = code - handler_status.status = status - status_file = os.path.join(state_dir, "status") - - try: - fileutil.write_file(status_file, - json.dumps(get_properties(handler_status))) - except (IOError, ValueError, ProtocolError) as e: - self.logger.error("Failed to save handler status: {0}", e) - - def get_handler_status(self): - state_dir = self.get_handler_state_dir() - status_file = os.path.join(state_dir, "status") - if not os.path.isfile(status_file): - return None - - try: - data = json.loads(fileutil.read_file(status_file)) - handler_status = ExtHandlerStatus() - set_properties("ExtHandlerStatus", handler_status, data) - return handler_status - except (IOError, ValueError) as e: - self.logger.error("Failed to get handler status: {0}", e) - - def get_full_name(self): - return "{0}-{1}".format(self.ext_handler.name, - self.ext_handler.properties.version) - - def get_base_dir(self): - return os.path.join(conf.get_lib_dir(), self.get_full_name()) - - def get_status_dir(self): - return os.path.join(self.get_base_dir(), "status") - - def get_conf_dir(self): - return os.path.join(self.get_base_dir(), 'config') - - def get_heartbeat_file(self): - return os.path.join(self.get_base_dir(), 'heartbeat.log') - - def get_manifest_file(self): - return os.path.join(self.get_base_dir(), 'HandlerManifest.json') - - def get_env_file(self): - return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') - - def get_log_dir(self): - return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name, - self.ext_handler.properties.version) - -class HandlerEnvironment(object): - def __init__(self, data): - self.data = data - - def get_version(self): - return self.data["version"] - - def get_log_dir(self): - return self.data["handlerEnvironment"]["logFolder"] - - def get_conf_dir(self): - return self.data["handlerEnvironment"]["configFolder"] - - def get_status_dir(self): - return self.data["handlerEnvironment"]["statusFolder"] - - def get_heartbeat_file(self): - return self.data["handlerEnvironment"]["heartbeatFile"] - -class HandlerManifest(object): - def __init__(self, data): - if data is None or data['handlerManifest'] is None: - raise ExtensionError('Malformed manifest file.') - self.data = data - - def get_name(self): - return self.data["name"] - - def get_version(self): - return self.data["version"] - - def get_install_command(self): - return self.data['handlerManifest']["installCommand"] - - def get_uninstall_command(self): - return self.data['handlerManifest']["uninstallCommand"] - - def get_update_command(self): - return self.data['handlerManifest']["updateCommand"] - - def get_enable_command(self): - return self.data['handlerManifest']["enableCommand"] - - def get_disable_command(self): - return self.data['handlerManifest']["disableCommand"] - - def is_reboot_after_install(self): - """ - Deprecated - """ - return False - - def is_report_heartbeat(self): - return self.data['handlerManifest'].get('reportHeartbeat', False) - - def is_update_with_install(self): - update_mode = self.data['handlerManifest'].get('updateMode') - if update_mode is None: - return True - return update_mode.low() == "updatewithinstall" diff --git a/azurelinuxagent/distro/default/init.py b/azurelinuxagent/distro/default/init.py deleted file mode 100644 index c703e87..0000000 --- a/azurelinuxagent/distro/default/init.py +++ /dev/null @@ -1,53 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -import azurelinuxagent.event as event - - -class InitHandler(object): - def __init__(self, distro): - self.distro = distro - - def run(self, verbose): - #Init stdout log - level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO - logger.add_logger_appender(logger.AppenderType.STDOUT, level) - - #Init config - conf_file_path = self.distro.osutil.get_agent_conf_file_path() - conf.load_conf_from_file(conf_file_path) - - #Init log - verbose = verbose or conf.get_logs_verbose() - level = logger.LogLevel.VERBOSE if verbose else logger.LogLevel.INFO - logger.add_logger_appender(logger.AppenderType.FILE, level, - path="/var/log/waagent.log") - logger.add_logger_appender(logger.AppenderType.CONSOLE, level, - path="/dev/console") - - #Init event reporter - event_dir = os.path.join(conf.get_lib_dir(), "events") - event.init_event_logger(event_dir) - event.enable_unhandled_err_dump("WALA") - - - diff --git a/azurelinuxagent/distro/default/monitor.py b/azurelinuxagent/distro/default/monitor.py deleted file mode 100644 index 3b26c9a..0000000 --- a/azurelinuxagent/distro/default/monitor.py +++ /dev/null @@ -1,182 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import sys -import traceback -import atexit -import json -import time -import datetime -import threading -import platform -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -from azurelinuxagent.event import WALAEventOperation, add_event -from azurelinuxagent.exception import EventError, ProtocolError, OSUtilError -from azurelinuxagent.future import ustr -from azurelinuxagent.utils.textutil import parse_doc, findall, find, getattrib -from azurelinuxagent.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ - TelemetryEvent, \ - set_properties, get_properties -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_LONG_VERSION - - -def parse_event(data_str): - try: - return parse_json_event(data_str) - except ValueError: - return parse_xml_event(data_str) - -def parse_xml_param(param_node): - name = getattrib(param_node, "Name") - value_str = getattrib(param_node, "Value") - attr_type = getattrib(param_node, "T") - value = value_str - if attr_type == 'mt:uint64': - value = int(value_str) - elif attr_type == 'mt:bool': - value = bool(value_str) - elif attr_type == 'mt:float64': - value = float(value_str) - return TelemetryEventParam(name, value) - -def parse_xml_event(data_str): - try: - xml_doc = parse_doc(data_str) - event_id = getattrib(find(xml_doc, "Event"), 'id') - provider_id = getattrib(find(xml_doc, "Provider"), 'id') - event = TelemetryEvent(event_id, provider_id) - param_nodes = findall(xml_doc, 'Param') - for param_node in param_nodes: - event.parameters.append(parse_xml_param(param_node)) - return event - except Exception as e: - raise ValueError(ustr(e)) - -def parse_json_event(data_str): - data = json.loads(data_str) - event = TelemetryEvent() - set_properties("TelemetryEvent", event, data) - return event - - -class MonitorHandler(object): - def __init__(self, distro): - self.distro = distro - self.sysinfo = [] - - def run(self): - event_thread = threading.Thread(target = self.daemon) - event_thread.setDaemon(True) - event_thread.start() - - def init_sysinfo(self): - osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), - DISTRO_NAME, - DISTRO_VERSION, - DISTRO_CODE_NAME, - platform.release()) - - - self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) - self.sysinfo.append(TelemetryEventParam("GAVersion", AGENT_LONG_VERSION)) - - try: - ram = self.distro.osutil.get_total_mem() - processors = self.distro.osutil.get_processor_cores() - self.sysinfo.append(TelemetryEventParam("RAM", ram)) - self.sysinfo.append(TelemetryEventParam("Processors", processors)) - except OSUtilError as e: - logger.warn("Failed to get system info: {0}", e) - - try: - protocol = self.distro.protocol_util.get_protocol() - vminfo = protocol.get_vminfo() - self.sysinfo.append(TelemetryEventParam("VMName", - vminfo.vmName)) - self.sysinfo.append(TelemetryEventParam("TenantName", - vminfo.tenantName)) - self.sysinfo.append(TelemetryEventParam("RoleName", - vminfo.roleName)) - self.sysinfo.append(TelemetryEventParam("RoleInstanceName", - vminfo.roleInstanceName)) - self.sysinfo.append(TelemetryEventParam("ContainerId", - vminfo.containerId)) - except ProtocolError as e: - logger.warn("Failed to get system info: {0}", e) - - def collect_event(self, evt_file_name): - try: - logger.verb("Found event file: {0}", evt_file_name) - with open(evt_file_name, "rb") as evt_file: - #if fail to open or delete the file, throw exception - data_str = evt_file.read().decode("utf-8",'ignore') - logger.verb("Processed event file: {0}", evt_file_name) - os.remove(evt_file_name) - return data_str - except IOError as e: - msg = "Failed to process {0}, {1}".format(evt_file_name, e) - raise EventError(msg) - - def collect_and_send_events(self): - event_list = TelemetryEventList() - event_dir = os.path.join(conf.get_lib_dir(), "events") - event_files = os.listdir(event_dir) - for event_file in event_files: - if not event_file.endswith(".tld"): - continue - event_file_path = os.path.join(event_dir, event_file) - try: - data_str = self.collect_event(event_file_path) - except EventError as e: - logger.error("{0}", e) - continue - - try: - event = parse_event(data_str) - event.parameters.extend(self.sysinfo) - event_list.events.append(event) - except (ValueError, ProtocolError) as e: - logger.warn("Failed to decode event file: {0}", e) - continue - - if len(event_list.events) == 0: - return - - try: - protocol = self.distro.protocol_util.get_protocol() - protocol.report_event(event_list) - except ProtocolError as e: - logger.error("{0}", e) - - def daemon(self): - self.init_sysinfo() - last_heartbeat = datetime.datetime.min - period = datetime.timedelta(hours = 12) - while(True): - if (datetime.datetime.now()-last_heartbeat) > period: - last_heartbeat = datetime.datetime.now() - add_event(op=WALAEventOperation.HeartBeat, name="WALA", - is_success=True) - try: - self.collect_and_send_events() - except Exception as e: - logger.warn("Failed to send events: {0}", e) - time.sleep(60) diff --git a/azurelinuxagent/distro/default/osutil.py b/azurelinuxagent/distro/default/osutil.py deleted file mode 100644 index 18ab2ba..0000000 --- a/azurelinuxagent/distro/default/osutil.py +++ /dev/null @@ -1,623 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import shutil -import socket -import array -import struct -import time -import pwd -import fcntl -import base64 -import azurelinuxagent.logger as logger -import azurelinuxagent.conf as conf -from azurelinuxagent.exception import OSUtilError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.utils.cryptutil import CryptUtil - -__RULES_FILES__ = [ "/lib/udev/rules.d/75-persistent-net-generator.rules", - "/etc/udev/rules.d/70-persistent-net.rules" ] - -""" -Define distro specific behavior. OSUtil class defines default behavior -for all distros. Each concrete distro classes could overwrite default behavior -if needed. -""" - -class DefaultOSUtil(object): - - def __init__(self): - self.agent_conf_file_path = '/etc/waagent.conf' - self.selinux=None - - def get_agent_conf_file_path(self): - return self.agent_conf_file_path - - def get_userentry(self, username): - try: - return pwd.getpwnam(username) - except KeyError: - return None - - def is_sys_user(self, username): - """ - Check whether use is a system user. - If reset sys user is allowed in conf, return False - Otherwise, check whether UID is less than UID_MIN - """ - if conf.get_allow_reset_sys_user(): - return False - - userentry = self.get_userentry(username) - uidmin = None - try: - uidmin_def = fileutil.get_line_startingwith("UID_MIN", - "/etc/login.defs") - if uidmin_def is not None: - uidmin = int(uidmin_def.split()[1]) - except IOError as e: - pass - if uidmin == None: - uidmin = 100 - if userentry != None and userentry[2] < uidmin: - return True - else: - return False - - def useradd(self, username, expiration=None): - """ - Create user account with 'username' - """ - userentry = self.get_userentry(username) - if userentry is not None: - logger.info("User {0} already exists, skip useradd", username) - return - - if expiration is not None: - cmd = "useradd -m {0} -e {1}".format(username, expiration) - else: - cmd = "useradd -m {0}".format(username) - retcode, out = shellutil.run_get_output(cmd) - if retcode != 0: - raise OSUtilError(("Failed to create user account:{0}, " - "retcode:{1}, " - "output:{2}").format(username, retcode, out)) - - def chpasswd(self, username, password, crypt_id=6, salt_len=10): - if self.is_sys_user(username): - raise OSUtilError(("User {0} is a system user. " - "Will not set passwd.").format(username)) - passwd_hash = textutil.gen_password_hash(password, crypt_id, salt_len) - cmd = "usermod -p '{0}' {1}".format(passwd_hash, username) - ret, output = shellutil.run_get_output(cmd, log_cmd=False) - if ret != 0: - raise OSUtilError(("Failed to set password for {0}: {1}" - "").format(username, output)) - - def conf_sudoer(self, username, nopasswd): - # for older distros create sudoers.d - if not os.path.isdir('/etc/sudoers.d/'): - # create the /etc/sudoers.d/ directory - os.mkdir('/etc/sudoers.d/') - # add the include of sudoers.d to the /etc/sudoers - sudoers = '\n' + '#includedir /etc/sudoers.d/\n' - fileutil.append_file('/etc/sudoers', sudoers) - sudoer = None - if nopasswd: - sudoer = "{0} ALL = (ALL) NOPASSWD\n".format(username) - else: - sudoer = "{0} ALL = (ALL) ALL\n".format(username) - fileutil.append_file('/etc/sudoers.d/waagent', sudoer) - fileutil.chmod('/etc/sudoers.d/waagent', 0o440) - - def del_root_password(self): - try: - passwd_file_path = conf.get_passwd_file_path() - passwd_content = fileutil.read_file(passwd_file_path) - passwd = passwd_content.split('\n') - new_passwd = [x for x in passwd if not x.startswith("root:")] - new_passwd.insert(0, "root:*LOCK*:14600::::::") - fileutil.write_file(passwd_file_path, "\n".join(new_passwd)) - except IOError as e: - raise OSUtilError("Failed to delete root password:{0}".format(e)) - - def _norm_path(self, filepath): - home = conf.get_home_dir() - # Expand HOME variable if present in path - path = os.path.normpath(filepath.replace("$HOME", home)) - return path - - def deploy_ssh_keypair(self, username, keypair): - """ - Deploy id_rsa and id_rsa.pub - """ - path, thumbprint = keypair - path = self._norm_path(path) - dir_path = os.path.dirname(path) - fileutil.mkdir(dir_path, mode=0o700, owner=username) - lib_dir = conf.get_lib_dir() - prv_path = os.path.join(lib_dir, thumbprint + '.prv') - if not os.path.isfile(prv_path): - raise OSUtilError("Can't find {0}.prv".format(thumbprint)) - shutil.copyfile(prv_path, path) - pub_path = path + '.pub' - crytputil = CryptUtil(conf.get_openssl_cmd()) - pub = crytputil.get_pubkey_from_prv(prv_path) - fileutil.write_file(pub_path, pub) - self.set_selinux_context(pub_path, 'unconfined_u:object_r:ssh_home_t:s0') - self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') - os.chmod(path, 0o644) - os.chmod(pub_path, 0o600) - - def openssl_to_openssh(self, input_file, output_file): - cryptutil = CryptUtil(conf.get_openssl_cmd()) - cryptutil.crt_to_ssh(input_file, output_file) - - def deploy_ssh_pubkey(self, username, pubkey): - """ - Deploy authorized_key - """ - path, thumbprint, value = pubkey - if path is None: - raise OSUtilError("Publich key path is None") - - crytputil = CryptUtil(conf.get_openssl_cmd()) - - path = self._norm_path(path) - dir_path = os.path.dirname(path) - fileutil.mkdir(dir_path, mode=0o700, owner=username) - if value is not None: - if not value.startswith("ssh-"): - raise OSUtilError("Bad public key: {0}".format(value)) - fileutil.write_file(path, value) - elif thumbprint is not None: - lib_dir = conf.get_lib_dir() - crt_path = os.path.join(lib_dir, thumbprint + '.crt') - if not os.path.isfile(crt_path): - raise OSUtilError("Can't find {0}.crt".format(thumbprint)) - pub_path = os.path.join(lib_dir, thumbprint + '.pub') - pub = crytputil.get_pubkey_from_crt(crt_path) - fileutil.write_file(pub_path, pub) - self.set_selinux_context(pub_path, - 'unconfined_u:object_r:ssh_home_t:s0') - self.openssl_to_openssh(pub_path, path) - fileutil.chmod(pub_path, 0o600) - else: - raise OSUtilError("SSH public key Fingerprint and Value are None") - - self.set_selinux_context(path, 'unconfined_u:object_r:ssh_home_t:s0') - fileutil.chowner(path, username) - fileutil.chmod(path, 0o644) - - def is_selinux_system(self): - """ - Checks and sets self.selinux = True if SELinux is available on system. - """ - if self.selinux == None: - if shellutil.run("which getenforce", chk_err=False) == 0: - self.selinux = True - else: - self.selinux = False - return self.selinux - - def is_selinux_enforcing(self): - """ - Calls shell command 'getenforce' and returns True if 'Enforcing'. - """ - if self.is_selinux_system(): - output = shellutil.run_get_output("getenforce")[1] - return output.startswith("Enforcing") - else: - return False - - def set_selinux_enforce(self, state): - """ - Calls shell command 'setenforce' with 'state' - and returns resulting exit code. - """ - if self.is_selinux_system(): - if state: s = '1' - else: s='0' - return shellutil.run("setenforce "+s) - - def set_selinux_context(self, path, con): - """ - Calls shell 'chcon' with 'path' and 'con' context. - Returns exit result. - """ - if self.is_selinux_system(): - return shellutil.run('chcon ' + con + ' ' + path) - - def set_ssh_client_alive_interval(self): - conf_file_path = conf.get_sshd_conf_file_path() - conf_file = fileutil.read_file(conf_file_path).split("\n") - textutil.set_ssh_config(conf_file, "ClientAliveInterval", "180") - fileutil.write_file(conf_file_path, '\n'.join(conf_file)) - logger.info("Configured SSH client probing to keep connections alive.") - - def conf_sshd(self, disable_password): - option = "no" if disable_password else "yes" - conf_file_path = conf.get_sshd_conf_file_path() - conf_file = fileutil.read_file(conf_file_path).split("\n") - textutil.set_ssh_config(conf_file, "PasswordAuthentication", option) - textutil.set_ssh_config(conf_file, "ChallengeResponseAuthentication", - option) - fileutil.write_file(conf_file_path, "\n".join(conf_file)) - logger.info("Disabled SSH password-based authentication methods.") - - - def get_dvd_device(self, dev_dir='/dev'): - patten=r'(sr[0-9]|hd[c-z]|cdrom[0-9])' - for dvd in [re.match(patten, dev) for dev in os.listdir(dev_dir)]: - if dvd is not None: - return "/dev/{0}".format(dvd.group(0)) - raise OSUtilError("Failed to get dvd device") - - def mount_dvd(self, max_retry=6, chk_err=True): - dvd = self.get_dvd_device() - mount_point = conf.get_dvd_mount_point() - mountlist = shellutil.run_get_output("mount")[1] - existing = self.get_mount_point(mountlist, dvd) - if existing is not None: #Already mounted - logger.info("{0} is already mounted at {1}", dvd, existing) - return - if not os.path.isdir(mount_point): - os.makedirs(mount_point) - - for retry in range(0, max_retry): - retcode = self.mount(dvd, mount_point, option="-o ro -t iso9660,udf", - chk_err=chk_err) - if retcode == 0: - logger.info("Successfully mounted dvd") - return - if retry < max_retry - 1: - logger.warn("Mount dvd failed: retry={0}, ret={1}", retry, - retcode) - time.sleep(5) - if chk_err: - raise OSUtilError("Failed to mount dvd.") - - def umount_dvd(self, chk_err=True): - mount_point = conf.get_dvd_mount_point() - retcode = self.umount(mount_point, chk_err=chk_err) - if chk_err and retcode != 0: - raise OSUtilError("Failed to umount dvd.") - - def eject_dvd(self, chk_err=True): - dvd = self.get_dvd_device() - retcode = shellutil.run("eject {0}".format(dvd)) - if chk_err and retcode != 0: - raise OSUtilError("Failed to eject dvd: ret={0}".format(retcode)) - - def load_atappix_mod(self): - if self.is_atapiix_mod_loaded(): - return - ret, kern_version = shellutil.run_get_output("uname -r") - if ret != 0: - raise Exception("Failed to call uname -r") - mod_path = os.path.join('/lib/modules', - kern_version.strip('\n'), - 'kernel/drivers/ata/ata_piix.ko') - if not os.path.isfile(mod_path): - raise Exception("Can't find module file:{0}".format(mod_path)) - - ret, output = shellutil.run_get_output("insmod " + mod_path) - if ret != 0: - raise Exception("Error calling insmod for ATAPI CD-ROM driver") - if not self.is_atapiix_mod_loaded(max_retry=3): - raise Exception("Failed to load ATAPI CD-ROM driver") - - def is_atapiix_mod_loaded(self, max_retry=1): - for retry in range(0, max_retry): - ret = shellutil.run("lsmod | grep ata_piix", chk_err=False) - if ret == 0: - logger.info("Module driver for ATAPI CD-ROM is already present.") - return True - if retry < max_retry - 1: - time.sleep(1) - return False - - def mount(self, dvd, mount_point, option="", chk_err=True): - cmd = "mount {0} {1} {2}".format(dvd, option, mount_point) - return shellutil.run_get_output(cmd, chk_err)[0] - - def umount(self, mount_point, chk_err=True): - return shellutil.run("umount {0}".format(mount_point), chk_err=chk_err) - - def allow_dhcp_broadcast(self): - #Open DHCP port if iptables is enabled. - # We supress error logging on error. - shellutil.run("iptables -D INPUT -p udp --dport 68 -j ACCEPT", - chk_err=False) - shellutil.run("iptables -I INPUT -p udp --dport 68 -j ACCEPT", - chk_err=False) - - - def remove_rules_files(self, rules_files=__RULES_FILES__): - lib_dir = conf.get_lib_dir() - for src in rules_files: - file_name = fileutil.base_name(src) - dest = os.path.join(lib_dir, file_name) - if os.path.isfile(dest): - os.remove(dest) - if os.path.isfile(src): - logger.warn("Move rules file {0} to {1}", file_name, dest) - shutil.move(src, dest) - - def restore_rules_files(self, rules_files=__RULES_FILES__): - lib_dir = conf.get_lib_dir() - for dest in rules_files: - filename = fileutil.base_name(dest) - src = os.path.join(lib_dir, filename) - if os.path.isfile(dest): - continue - if os.path.isfile(src): - logger.warn("Move rules file {0} to {1}", filename, dest) - shutil.move(src, dest) - - def get_mac_addr(self): - """ - Convienience function, returns mac addr bound to - first non-loobback interface. - """ - ifname='' - while len(ifname) < 2 : - ifname=self.get_first_if()[0] - addr = self.get_if_mac(ifname) - return textutil.hexstr_to_bytearray(addr) - - def get_if_mac(self, ifname): - """ - Return the mac-address bound to the socket. - """ - sock = socket.socket(socket.AF_INET, - socket.SOCK_DGRAM, - socket.IPPROTO_UDP) - param = struct.pack('256s', (ifname[:15]+('\0'*241)).encode('latin-1')) - info = fcntl.ioctl(sock.fileno(), 0x8927, param) - return ''.join(['%02X' % textutil.str_to_ord(char) for char in info[18:24]]) - - def get_first_if(self): - """ - Return the interface name, and ip addr of the - first active non-loopback interface. - """ - iface='' - expected=16 # how many devices should I expect... - struct_size=40 # for 64bit the size is 40 bytes - sock = socket.socket(socket.AF_INET, - socket.SOCK_DGRAM, - socket.IPPROTO_UDP) - buff=array.array('B', b'\0' * (expected * struct_size)) - param = struct.pack('iL', - expected*struct_size, - buff.buffer_info()[0]) - ret = fcntl.ioctl(sock.fileno(), 0x8912, param) - retsize=(struct.unpack('iL', ret)[0]) - if retsize == (expected * struct_size): - logger.warn(('SIOCGIFCONF returned more than {0} up ' - 'network interfaces.'), expected) - sock = buff.tostring() - for i in range(0, struct_size * expected, struct_size): - iface=sock[i:i+16].split(b'\0', 1)[0] - if iface == b'lo': - continue - else: - break - return iface.decode('latin-1'), socket.inet_ntoa(sock[i+20:i+24]) - - def is_missing_default_route(self): - routes = shellutil.run_get_output("route -n")[1] - for route in routes.split("\n"): - if route.startswith("0.0.0.0 ") or route.startswith("default "): - return False - return True - - def get_if_name(self): - return self.get_first_if()[0] - - def get_ip4_addr(self): - return self.get_first_if()[1] - - def set_route_for_dhcp_broadcast(self, ifname): - return shellutil.run("route add 255.255.255.255 dev {0}".format(ifname), - chk_err=False) - - def remove_route_for_dhcp_broadcast(self, ifname): - shellutil.run("route del 255.255.255.255 dev {0}".format(ifname), - chk_err=False) - - def is_dhcp_enabled(self): - return False - - def stop_dhcp_service(self): - pass - - def start_dhcp_service(self): - pass - - def start_network(self): - pass - - def start_agent_service(self): - pass - - def stop_agent_service(self): - pass - - def register_agent_service(self): - pass - - def unregister_agent_service(self): - pass - - def restart_ssh_service(self): - pass - - def route_add(self, net, mask, gateway): - """ - Add specified route using /sbin/route add -net. - """ - cmd = ("/sbin/route add -net " - "{0} netmask {1} gw {2}").format(net, mask, gateway) - return shellutil.run(cmd, chk_err=False) - - def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof dhclient") - return ret[1] if ret[0] == 0 else None - - def set_hostname(self, hostname): - fileutil.write_file('/etc/hostname', hostname) - shellutil.run("hostname {0}".format(hostname), chk_err=False) - - def set_dhcp_hostname(self, hostname): - autosend = r'^[^#]*?send\s*host-name.*?(|gethostname[(,)])' - dhclient_files = ['/etc/dhcp/dhclient.conf', '/etc/dhcp3/dhclient.conf'] - for conf_file in dhclient_files: - if not os.path.isfile(conf_file): - continue - if fileutil.findstr_in_file(conf_file, autosend): - #Return if auto send host-name is configured - return - fileutil.update_conf_file(conf_file, - 'send host-name', - 'send host-name {0}'.format(hostname)) - - def restart_if(self, ifname): - shellutil.run("ifdown {0} && ifup {1}".format(ifname, ifname)) - - def publish_hostname(self, hostname): - self.set_dhcp_hostname(hostname) - ifname = self.get_if_name() - self.restart_if(ifname) - - def set_scsi_disks_timeout(self, timeout): - for dev in os.listdir("/sys/block"): - if dev.startswith('sd'): - self.set_block_device_timeout(dev, timeout) - - def set_block_device_timeout(self, dev, timeout): - if dev is not None and timeout is not None: - file_path = "/sys/block/{0}/device/timeout".format(dev) - content = fileutil.read_file(file_path) - original = content.splitlines()[0].rstrip() - if original != timeout: - fileutil.write_file(file_path, timeout) - logger.info("Set block dev timeout: {0} with timeout: {1}", - dev, timeout) - - def get_mount_point(self, mountlist, device): - """ - Example of mountlist: - /dev/sda1 on / type ext4 (rw) - proc on /proc type proc (rw) - sysfs on /sys type sysfs (rw) - devpts on /dev/pts type devpts (rw,gid=5,mode=620) - tmpfs on /dev/shm type tmpfs - (rw,rootcontext="system_u:object_r:tmpfs_t:s0") - none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw) - /dev/sdb1 on /mnt/resource type ext4 (rw) - """ - if (mountlist and device): - for entry in mountlist.split('\n'): - if(re.search(device, entry)): - tokens = entry.split() - #Return the 3rd column of this line - return tokens[2] if len(tokens) > 2 else None - return None - - def device_for_ide_port(self, port_id): - """ - Return device name attached to ide port 'n'. - """ - if port_id > 3: - return None - g0 = "00000000" - if port_id > 1: - g0 = "00000001" - port_id = port_id - 2 - device = None - path = "/sys/bus/vmbus/devices/" - for vmbus in os.listdir(path): - deviceid = fileutil.read_file(os.path.join(path, vmbus, "device_id")) - guid = deviceid.lstrip('{').split('-') - if guid[0] == g0 and guid[1] == "000" + ustr(port_id): - for root, dirs, files in os.walk(path + vmbus): - if root.endswith("/block"): - device = dirs[0] - break - else : #older distros - for d in dirs: - if ':' in d and "block" == d.split(':')[0]: - device = d.split(':')[1] - break - break - return device - - def del_account(self, username): - if self.is_sys_user(username): - logger.error("{0} is a system user. Will not delete it.", username) - shellutil.run("> /var/run/utmp") - shellutil.run("userdel -f -r " + username) - #Remove user from suders - if os.path.isfile("/etc/suders.d/waagent"): - try: - content = fileutil.read_file("/etc/sudoers.d/waagent") - sudoers = content.split("\n") - sudoers = [x for x in sudoers if username not in x] - fileutil.write_file("/etc/sudoers.d/waagent", - "\n".join(sudoers)) - except IOError as e: - raise OSUtilError("Failed to remove sudoer: {0}".format(e)) - - def decode_customdata(self, data): - return base64.b64decode(data) - - def get_total_mem(self): - cmd = "grep MemTotal /proc/meminfo |awk '{print $2}'" - ret = shellutil.run_get_output(cmd) - if ret[0] == 0: - return int(ret[1])/1024 - else: - raise OSUtilError("Failed to get total memory: {0}".format(ret[1])) - - def get_processor_cores(self): - ret = shellutil.run_get_output("grep 'processor.*:' /proc/cpuinfo |wc -l") - if ret[0] == 0: - return int(ret[1]) - else: - raise OSUtilError("Failed to get procerssor cores") - - def set_admin_access_to_ip(self, dest_ip): - #This allows root to access dest_ip - rm_old= "iptables -D OUTPUT -d {0} -j ACCEPT -m owner --uid-owner 0" - rule = "iptables -A OUTPUT -d {0} -j ACCEPT -m owner --uid-owner 0" - shellutil.run(rm_old.format(dest_ip), chk_err=False) - shellutil.run(rule.format(dest_ip)) - - #This blocks all other users to access dest_ip - rm_old = "iptables -D OUTPUT -d {0} -j DROP" - rule = "iptables -A OUTPUT -d {0} -j DROP" - shellutil.run(rm_old.format(dest_ip), chk_err=False) - shellutil.run(rule.format(dest_ip)) - diff --git a/azurelinuxagent/distro/default/protocolUtil.py b/azurelinuxagent/distro/default/protocolUtil.py deleted file mode 100644 index 34466cf..0000000 --- a/azurelinuxagent/distro/default/protocolUtil.py +++ /dev/null @@ -1,243 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -import os -import re -import shutil -import time -import threading -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, OSUtilError, \ - ProtocolNotFoundError, DhcpError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.protocol.ovfenv import OvfEnv -from azurelinuxagent.protocol.wire import WireProtocol -from azurelinuxagent.protocol.metadata import MetadataProtocol, METADATA_ENDPOINT -import azurelinuxagent.utils.shellutil as shellutil - -OVF_FILE_NAME = "ovf-env.xml" - -#Tag file to indicate usage of metadata protocol -TAG_FILE_NAME = "useMetadataEndpoint.tag" - -PROTOCOL_FILE_NAME = "Protocol" - -#MAX retry times for protocol probing -MAX_RETRY = 360 - -PROBE_INTERVAL = 10 - -ENDPOINT_FILE_NAME = "WireServerEndpoint" - -class ProtocolUtil(object): - """ - ProtocolUtil handles initialization for protocol instance. 2 protocol types - are invoked, wire protocol and metadata protocols. - """ - def __init__(self, distro): - self.distro = distro - self.protocol = None - self.lock = threading.Lock() - - def copy_ovf_env(self): - """ - Copy ovf env file from dvd to hard disk. - Remove password before save it to the disk - """ - dvd_mount_point = conf.get_dvd_mount_point() - ovf_file_path_on_dvd = os.path.join(dvd_mount_point, OVF_FILE_NAME) - tag_file_path_on_dvd = os.path.join(dvd_mount_point, TAG_FILE_NAME) - try: - self.distro.osutil.mount_dvd() - ovfxml = fileutil.read_file(ovf_file_path_on_dvd, remove_bom=True) - ovfenv = OvfEnv(ovfxml) - ovfxml = re.sub(".*?<", "*<", ovfxml) - ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) - fileutil.write_file(ovf_file_path, ovfxml) - - if os.path.isfile(tag_file_path_on_dvd): - logger.info("Found {0} in provisioning ISO", TAG_FILE_NAME) - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) - shutil.copyfile(tag_file_path_on_dvd, tag_file_path) - - except (OSUtilError, IOError) as e: - raise ProtocolError(ustr(e)) - - try: - self.distro.osutil.umount_dvd() - self.distro.osutil.eject_dvd() - except OSUtilError as e: - logger.warn(ustr(e)) - - return ovfenv - - def get_ovf_env(self): - """ - Load saved ovf-env.xml - """ - ovf_file_path = os.path.join(conf.get_lib_dir(), OVF_FILE_NAME) - if os.path.isfile(ovf_file_path): - xml_text = fileutil.read_file(ovf_file_path) - return OvfEnv(xml_text) - else: - raise ProtocolError("ovf-env.xml is missing.") - - def _get_wireserver_endpoint(self): - try: - file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) - return fileutil.read_file(file_path) - except IOError as e: - raise OSUtilError(ustr(e)) - - def _set_wireserver_endpoint(self, endpoint): - try: - file_path = os.path.join(conf.get_lib_dir(), ENDPOINT_FILE_NAME) - fileutil.write_file(file_path, endpoint) - except IOError as e: - raise OSUtilError(ustr(e)) - - def _detect_wire_protocol(self): - endpoint = self.distro.dhcp_handler.endpoint - if endpoint is None: - logger.info("WireServer endpoint is not found. Rerun dhcp handler") - try: - self.distro.dhcp_handler.run() - except DhcpError as e: - raise ProtocolError(ustr(e)) - endpoint = self.distro.dhcp_handler.endpoint - - try: - protocol = WireProtocol(endpoint) - protocol.detect() - self._set_wireserver_endpoint(endpoint) - return protocol - except ProtocolError as e: - logger.info("WireServer is not responding. Reset endpoint") - self.distro.dhcp_handler.endpoint = None - raise e - - def _detect_metadata_protocol(self): - protocol = MetadataProtocol() - protocol.detect() - - #Only allow root access METADATA_ENDPOINT - self.distro.osutil.set_admin_access_to_ip(METADATA_ENDPOINT) - - return protocol - - def _detect_protocol(self, protocols): - """ - Probe protocol endpoints in turn. - """ - protocol_file_path = os.path.join(conf.get_lib_dir(), PROTOCOL_FILE_NAME) - if os.path.isfile(protocol_file_path): - os.remove(protocol_file_path) - for retry in range(0, MAX_RETRY): - for protocol in protocols: - try: - if protocol == "WireProtocol": - return self._detect_wire_protocol() - - if protocol == "MetadataProtocol": - return self._detect_metadata_protocol() - - except ProtocolError as e: - logger.info("Protocol endpoint not found: {0}, {1}", - protocol, e) - - if retry < MAX_RETRY -1: - logger.info("Retry detect protocols: retry={0}", retry) - time.sleep(PROBE_INTERVAL) - raise ProtocolNotFoundError("No protocol found.") - - def _get_protocol(self): - """ - Get protocol instance based on previous detecting result. - """ - protocol_file_path = os.path.join(conf.get_lib_dir(), - PROTOCOL_FILE_NAME) - if not os.path.isfile(protocol_file_path): - raise ProtocolError("No protocl found") - - protocol_name = fileutil.read_file(protocol_file_path) - if protocol_name == "WireProtocol": - endpoint = self._get_wireserver_endpoint() - return WireProtocol(endpoint) - elif protocol_name == "MetadataProtocol": - return MetadataProtocol() - else: - raise ProtocolNotFoundError(("Unknown protocol: {0}" - "").format(protocol_name)) - - def detect_protocol(self): - """ - Detect protocol by endpoints - - :returns: protocol instance - """ - logger.info("Detect protocol endpoints") - protocols = ["WireProtocol", "MetadataProtocol"] - self.lock.acquire() - try: - if self.protocol is None: - self.protocol = self._detect_protocol(protocols) - return self.protocol - finally: - self.lock.release() - - def detect_protocol_by_file(self): - """ - Detect protocol by tag file. - - If a file "useMetadataEndpoint.tag" is found on provision iso, - metedata protocol will be used. No need to probe for wire protocol - - :returns: protocol instance - """ - logger.info("Detect protocol by file") - self.lock.acquire() - try: - tag_file_path = os.path.join(conf.get_lib_dir(), TAG_FILE_NAME) - if self.protocol is None: - protocols = [] - if os.path.isfile(tag_file_path): - protocols.append("MetadataProtocol") - else: - protocols.append("WireProtocol") - self.protocol = self._detect_protocol(protocols) - finally: - self.lock.release() - return self.protocol - - def get_protocol(self): - """ - Get protocol instance based on previous detecting result. - - :returns protocol instance - """ - self.lock.acquire() - try: - if self.protocol is None: - self.protocol = self._get_protocol() - return self.protocol - finally: - self.lock.release() - return self.protocol - diff --git a/azurelinuxagent/distro/default/provision.py b/azurelinuxagent/distro/default/provision.py deleted file mode 100644 index 695b82a..0000000 --- a/azurelinuxagent/distro/default/provision.py +++ /dev/null @@ -1,191 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -""" -Provision handler -""" - -import os -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProvisionError, ProtocolError, OSUtilError -from azurelinuxagent.protocol.restapi import ProvisionStatus -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.fileutil as fileutil - -CUSTOM_DATA_FILE="CustomData" - -class ProvisionHandler(object): - - def __init__(self, distro): - self.distro = distro - - def run(self): - #If provision is not enabled, return - if not conf.get_provision_enabled(): - logger.info("Provisioning is disabled. Skip.") - return - - provisioned = os.path.join(conf.get_lib_dir(), "provisioned") - if os.path.isfile(provisioned): - return - - logger.info("Run provision handler.") - logger.info("Copy ovf-env.xml.") - try: - ovfenv = self.distro.protocol_util.copy_ovf_env() - except ProtocolError as e: - self.report_event("Failed to copy ovf-env.xml: {0}".format(e)) - return - - self.distro.protocol_util.detect_protocol_by_file() - - self.report_not_ready("Provisioning", "Starting") - - try: - logger.info("Start provisioning") - self.provision(ovfenv) - fileutil.write_file(provisioned, "") - thumbprint = self.reg_ssh_host_key() - logger.info("Finished provisioning") - except ProvisionError as e: - logger.error("Provision failed: {0}", e) - self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) - return - - self.report_ready(thumbprint) - self.report_event("Provision succeed", is_success=True) - - def reg_ssh_host_key(self): - keypair_type = conf.get_ssh_host_keypair_type() - if conf.get_regenerate_ssh_host_key(): - shellutil.run("rm -f /etc/ssh/ssh_host_*key*") - shellutil.run(("ssh-keygen -N '' -t {0} -f /etc/ssh/ssh_host_{1}_key" - "").format(keypair_type, keypair_type)) - thumbprint = self.get_ssh_host_key_thumbprint(keypair_type) - return thumbprint - - def get_ssh_host_key_thumbprint(self, keypair_type): - cmd = "ssh-keygen -lf /etc/ssh/ssh_host_{0}_key.pub".format(keypair_type) - ret = shellutil.run_get_output(cmd) - if ret[0] == 0: - return ret[1].rstrip().split()[1].replace(':', '') - else: - raise ProvisionError(("Failed to generate ssh host key: " - "ret={0}, out= {1}").format(ret[0], ret[1])) - - def provision(self, ovfenv): - logger.info("Handle ovf-env.xml.") - try: - logger.info("Set host name.") - self.distro.osutil.set_hostname(ovfenv.hostname) - - logger.info("Publish host name.") - self.distro.osutil.publish_hostname(ovfenv.hostname) - - self.config_user_account(ovfenv) - - self.save_customdata(ovfenv) - - if conf.get_delete_root_password(): - self.distro.osutil.del_root_password() - - except OSUtilError as e: - raise ProvisionError("Failed to handle ovf-env.xml: {0}".format(e)) - - def config_user_account(self, ovfenv): - logger.info("Create user account if not exists") - self.distro.osutil.useradd(ovfenv.username) - - if ovfenv.user_password is not None: - logger.info("Set user password.") - crypt_id = conf.get_password_cryptid() - salt_len = conf.get_password_crypt_salt_len() - self.distro.osutil.chpasswd(ovfenv.username, ovfenv.user_password, - crypt_id=crypt_id, salt_len=salt_len) - - logger.info("Configure sudoer") - self.distro.osutil.conf_sudoer(ovfenv.username, ovfenv.user_password is None) - - logger.info("Configure sshd") - self.distro.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) - - #Disable selinux temporary - sel = self.distro.osutil.is_selinux_enforcing() - if sel: - self.distro.osutil.set_selinux_enforce(0) - - self.deploy_ssh_pubkeys(ovfenv) - self.deploy_ssh_keypairs(ovfenv) - - if sel: - self.distro.osutil.set_selinux_enforce(1) - - self.distro.osutil.restart_ssh_service() - - def save_customdata(self, ovfenv): - customdata = ovfenv.customdata - if customdata is None: - return - - logger.info("Save custom data") - lib_dir = conf.get_lib_dir() - if conf.get_decode_customdata(): - customdata= self.distro.osutil.decode_customdata(customdata) - customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) - fileutil.write_file(customdata_file, customdata) - - if conf.get_execute_customdata(): - logger.info("Execute custom data") - os.chmod(customdata_file, 0o700) - shellutil.run(customdata_file) - - def deploy_ssh_pubkeys(self, ovfenv): - for pubkey in ovfenv.ssh_pubkeys: - logger.info("Deploy ssh public key.") - self.distro.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) - - def deploy_ssh_keypairs(self, ovfenv): - for keypair in ovfenv.ssh_keypairs: - logger.info("Deploy ssh key pairs.") - self.distro.osutil.deploy_ssh_keypair(ovfenv.username, keypair) - - def report_event(self, message, is_success=False): - add_event(name="WALA", message=message, is_success=is_success, - op=WALAEventOperation.Provision) - - def report_not_ready(self, sub_status, description): - status = ProvisionStatus(status="NotReady", subStatus=sub_status, - description=description) - try: - protocol = self.distro.protocol_util.get_protocol() - protocol.report_provision_status(status) - except ProtocolError as e: - self.report_event(ustr(e)) - - def report_ready(self, thumbprint=None): - status = ProvisionStatus(status="Ready") - status.properties.certificateThumbprint = thumbprint - try: - protocol = self.distro.protocol_util.get_protocol() - protocol.report_provision_status(status) - except ProtocolError as e: - self.report_event(ustr(e)) - diff --git a/azurelinuxagent/distro/default/resourceDisk.py b/azurelinuxagent/distro/default/resourceDisk.py deleted file mode 100644 index a6c5232..0000000 --- a/azurelinuxagent/distro/default/resourceDisk.py +++ /dev/null @@ -1,167 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import threading -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -from azurelinuxagent.event import add_event, WALAEventOperation -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.exception import ResourceDiskError - -DATALOSS_WARNING_FILE_NAME="DATALOSS_WARNING_README.txt" -DATA_LOSS_WARNING="""\ -WARNING: THIS IS A TEMPORARY DISK. - -Any data stored on this drive is SUBJECT TO LOSS and THERE IS NO WAY TO RECOVER IT. - -Please do not use this disk for storing any personal or application data. - -For additional details to please refer to the MSDN documentation at : http://msdn.microsoft.com/en-us/library/windowsazure/jj672979.aspx -""" - -class ResourceDiskHandler(object): - def __init__(self, distro): - self.distro = distro - - def start_activate_resource_disk(self): - disk_thread = threading.Thread(target = self.run) - disk_thread.start() - - def run(self): - mount_point = None - if conf.get_resourcedisk_format(): - mount_point = self.activate_resource_disk() - if mount_point is not None and \ - conf.get_resourcedisk_enable_swap(): - self.enable_swap(mount_point) - - def activate_resource_disk(self): - logger.info("Activate resource disk") - try: - mount_point = conf.get_resourcedisk_mountpoint() - fs = conf.get_resourcedisk_filesystem() - mount_point = self.mount_resource_disk(mount_point, fs) - warning_file = os.path.join(mount_point, DATALOSS_WARNING_FILE_NAME) - try: - fileutil.write_file(warning_file, DATA_LOSS_WARNING) - except IOError as e: - logger.warn("Failed to write data loss warnning:{0}", e) - return mount_point - except ResourceDiskError as e: - logger.error("Failed to mount resource disk {0}", e) - add_event(name="WALA", is_success=False, message=ustr(e), - op=WALAEventOperation.ActivateResourceDisk) - - def enable_swap(self, mount_point): - logger.info("Enable swap") - try: - size_mb = conf.get_resourcedisk_swap_size_mb() - self.create_swap_space(mount_point, size_mb) - except ResourceDiskError as e: - logger.error("Failed to enable swap {0}", e) - - def mount_resource_disk(self, mount_point, fs): - device = self.distro.osutil.device_for_ide_port(1) - if device is None: - raise ResourceDiskError("unable to detect disk topology") - - device = "/dev/" + device - mountlist = shellutil.run_get_output("mount")[1] - existing = self.distro.osutil.get_mount_point(mountlist, device) - - if(existing): - logger.info("Resource disk {0}1 is already mounted", device) - return existing - - fileutil.mkdir(mount_point, mode=0o755) - - logger.info("Detect GPT...") - partition = device + "1" - ret = shellutil.run_get_output("parted {0} print".format(device)) - if ret[0]: - raise ResourceDiskError("({0}) {1}".format(device, ret[1])) - - if "gpt" in ret[1]: - logger.info("GPT detected") - logger.info("Get GPT partitions") - parts = [x for x in ret[1].split("\n") if re.match("^\s*[0-9]+", x)] - logger.info("Found more than {0} GPT partitions.", len(parts)) - if len(parts) > 1: - logger.info("Remove old GPT partitions") - for i in range(1, len(parts) + 1): - logger.info("Remove partition: {0}", i) - shellutil.run("parted {0} rm {1}".format(device, i)) - - logger.info("Create a new GPT partition using entire disk space") - shellutil.run("parted {0} mkpart primary 0% 100%".format(device)) - - logger.info("Format partition: {0} with fstype {1}",partition,fs) - shellutil.run("mkfs." + fs + " " + partition + " -F") - else: - logger.info("GPT not detected") - logger.info("Check fstype") - ret = shellutil.run_get_output("sfdisk -q -c {0} 1".format(device)) - if ret[1].rstrip() == "7" and fs != "ntfs": - logger.info("The partition is formatted with ntfs") - logger.info("Format partition: {0} with fstype {1}",partition,fs) - shellutil.run("sfdisk -c {0} 1 83".format(device)) - shellutil.run("mkfs." + fs + " " + partition + " -F") - - logger.info("Mount resource disk") - ret = shellutil.run("mount {0} {1}".format(partition, mount_point), - chk_err=False) - if ret: - logger.warn("Failed to mount resource disk. Retry mounting") - shellutil.run("mkfs." + fs + " " + partition + " -F") - ret = shellutil.run("mount {0} {1}".format(partition, mount_point)) - if ret: - raise ResourceDiskError("({0}) {1}".format(partition, ret)) - - logger.info("Resource disk ({0}) is mounted at {1} with fstype {2}", - device, mount_point, fs) - return mount_point - - def create_swap_space(self, mount_point, size_mb): - size_kb = size_mb * 1024 - size = size_kb * 1024 - swapfile = os.path.join(mount_point, 'swapfile') - swaplist = shellutil.run_get_output("swapon -s")[1] - - if swapfile in swaplist and os.path.getsize(swapfile) == size: - logger.info("Swap already enabled") - return - - if os.path.isfile(swapfile) and os.path.getsize(swapfile) != size: - logger.info("Remove old swap file") - shellutil.run("swapoff -a", chk_err=False) - os.remove(swapfile) - - if not os.path.isfile(swapfile): - logger.info("Create swap file") - shellutil.run(("dd if=/dev/zero of={0} bs=1024 " - "count={1}").format(swapfile, size_kb)) - shellutil.run("mkswap {0}".format(swapfile)) - if shellutil.run("swapon {0}".format(swapfile)): - raise ResourceDiskError("{0}".format(swapfile)) - logger.info("Enabled {0}KB of swap at {1}".format(size_kb, swapfile)) - diff --git a/azurelinuxagent/distro/default/scvmm.py b/azurelinuxagent/distro/default/scvmm.py deleted file mode 100644 index 4d083b4..0000000 --- a/azurelinuxagent/distro/default/scvmm.py +++ /dev/null @@ -1,48 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import subprocess -import azurelinuxagent.logger as logger - -VMM_CONF_FILE_NAME = "linuxosconfiguration.xml" -VMM_STARTUP_SCRIPT_NAME= "install" - -class ScvmmHandler(object): - def __init__(self, distro): - self.distro = distro - - def detect_scvmm_env(self): - logger.info("Detecting Microsoft System Center VMM Environment") - self.distro.osutil.mount_dvd(max_retry=1, chk_err=False) - mount_point = self.distro.osutil.get_dvd_mount_point() - found = os.path.isfile(os.path.join(mount_point, VMM_CONF_FILE_NAME)) - if found: - self.start_scvmm_agent() - else: - self.distro.osutil.umount_dvd(chk_err=False) - return found - - def start_scvmm_agent(self): - logger.info("Starting Microsoft System Center VMM Initialization " - "Process") - mount_point = self.distro.osutil.get_dvd_mount_point() - startup_script = os.path.join(mount_point, VMM_STARTUP_SCRIPT_NAME) - subprocess.Popen(["/bin/bash", startup_script, "-p " + mount_point]) - diff --git a/azurelinuxagent/distro/loader.py b/azurelinuxagent/distro/loader.py deleted file mode 100644 index 74ea9e7..0000000 --- a/azurelinuxagent/distro/loader.py +++ /dev/null @@ -1,67 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import azurelinuxagent.logger as logger -from azurelinuxagent.utils.textutil import Version -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_FULL_NAME -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.ubuntu.distro import UbuntuDistro, \ - Ubuntu14Distro, \ - Ubuntu12Distro, \ - UbuntuSnappyDistro -from azurelinuxagent.distro.redhat.distro import RedhatDistro, Redhat6xDistro -from azurelinuxagent.distro.coreos.distro import CoreOSDistro -from azurelinuxagent.distro.suse.distro import SUSE11Distro, SUSEDistro -from azurelinuxagent.distro.debian.distro import DebianDistro - -def get_distro(distro_name=DISTRO_NAME, distro_version=DISTRO_VERSION, - distro_full_name=DISTRO_FULL_NAME): - if distro_name == "ubuntu": - if Version(distro_version) == Version("12.04") or \ - Version(distro_version) == Version("12.10"): - return Ubuntu12Distro() - elif Version(distro_version) == Version("14.04") or \ - Version(distro_version) == Version("14.10"): - return Ubuntu14Distro() - elif distro_full_name == "Snappy Ubuntu Core": - return UbuntuSnappyDistro() - else: - return UbuntuDistro() - if distro_name == "coreos": - return CoreOSDistro() - if distro_name == "suse": - if distro_full_name=='SUSE Linux Enterprise Server' and \ - Version(distro_version) < Version('12') or \ - distro_full_name == 'openSUSE' and \ - Version(distro_version) < Version('13.2'): - return SUSE11Distro() - else: - return SUSEDistro() - elif distro_name == "debian": - return DebianDistro() - elif distro_name == "redhat" or distro_name == "centos" or \ - distro_name == "oracle": - if Version(distro_version) < Version("7"): - return Redhat6xDistro() - else: - return RedhatDistro() - else: - logger.warn("Unable to load distro implemetation for {0}.", distro_name) - logger.warn("Use default distro implemetation instead.") - return DefaultDistro() - diff --git a/azurelinuxagent/distro/redhat/__init__.py b/azurelinuxagent/distro/redhat/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/distro/redhat/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/azurelinuxagent/distro/redhat/distro.py b/azurelinuxagent/distro/redhat/distro.py deleted file mode 100644 index 2f128d7..0000000 --- a/azurelinuxagent/distro/redhat/distro.py +++ /dev/null @@ -1,32 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.redhat.osutil import RedhatOSUtil, Redhat6xOSUtil -from azurelinuxagent.distro.coreos.deprovision import CoreOSDeprovisionHandler - -class Redhat6xDistro(DefaultDistro): - def __init__(self): - super(Redhat6xDistro, self).__init__() - self.osutil = Redhat6xOSUtil() - -class RedhatDistro(DefaultDistro): - def __init__(self): - super(RedhatDistro, self).__init__() - self.osutil = RedhatOSUtil() diff --git a/azurelinuxagent/distro/redhat/osutil.py b/azurelinuxagent/distro/redhat/osutil.py deleted file mode 100644 index 7f769a5..0000000 --- a/azurelinuxagent/distro/redhat/osutil.py +++ /dev/null @@ -1,115 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import base64 -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr, bytebuffer -from azurelinuxagent.exception import OSUtilError, CryptError -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil - -class Redhat6xOSUtil(DefaultOSUtil): - def __init__(self): - super(Redhat6xOSUtil, self).__init__() - - def start_network(self): - return shellutil.run("/sbin/service networking start", chk_err=False) - - def restart_ssh_service(self): - return shellutil.run("/sbin/service sshd condrestart", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("/sbin/service waagent stop", chk_err=False) - - def start_agent_service(self): - return shellutil.run("/sbin/service waagent start", chk_err=False) - - def register_agent_service(self): - return shellutil.run("chkconfig --add waagent", chk_err=False) - - def unregister_agent_service(self): - return shellutil.run("chkconfig --del waagent", chk_err=False) - - def openssl_to_openssh(self, input_file, output_file): - pubkey = fileutil.read_file(input_file) - try: - cryptutil = CryptUtil(conf.get_openssl_cmd()) - ssh_rsa_pubkey = cryptutil.asn1_to_ssh(pubkey) - except CryptError as e: - raise OSUtilError(ustr(e)) - fileutil.write_file(output_file, ssh_rsa_pubkey) - - #Override - def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof dhclient") - return ret[1] if ret[0] == 0 else None - - def set_hostname(self, hostname): - """ - Set /etc/sysconfig/network - """ - fileutil.update_conf_file('/etc/sysconfig/network', - 'HOSTNAME', - 'HOSTNAME={0}'.format(hostname)) - shellutil.run("hostname {0}".format(hostname), chk_err=False) - - def set_dhcp_hostname(self, hostname): - ifname = self.get_if_name() - filepath = "/etc/sysconfig/network-scripts/ifcfg-{0}".format(ifname) - fileutil.update_conf_file(filepath, 'DHCP_HOSTNAME', - 'DHCP_HOSTNAME={0}'.format(hostname)) - -class RedhatOSUtil(Redhat6xOSUtil): - def __init__(self): - super(RedhatOSUtil, self).__init__() - - def set_hostname(self, hostname): - """ - Set /etc/hostname - Unlike redhat 6.x, redhat 7.x will set hostname to /etc/hostname - """ - DefaultOSUtil.set_hostname(self, hostname) - - def publish_hostname(self, hostname): - """ - Restart NetworkManager first before publishing hostname - """ - shellutil.run("service NetworkManager restart") - super(RedhatOSUtil, self).publish_hostname(hostname) - - def register_agent_service(self): - return shellutil.run("systemctl enable waagent", chk_err=False) - - def unregister_agent_service(self): - return shellutil.run("systemctl disable waagent", chk_err=False) - - def openssl_to_openssh(self, input_file, output_file): - DefaultOSUtil.openssl_to_openssh(self, input_file, output_file) diff --git a/azurelinuxagent/distro/suse/__init__.py b/azurelinuxagent/distro/suse/__init__.py index d9b82f5..1ea2f38 100644 --- a/azurelinuxagent/distro/suse/__init__.py +++ b/azurelinuxagent/distro/suse/__init__.py @@ -1,5 +1,3 @@ -# Microsoft Azure Linux Agent -# # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/azurelinuxagent/distro/suse/distro.py b/azurelinuxagent/distro/suse/distro.py deleted file mode 100644 index 5b39369..0000000 --- a/azurelinuxagent/distro/suse/distro.py +++ /dev/null @@ -1,32 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.suse.osutil import SUSE11OSUtil, SUSEOSUtil - -class SUSE11Distro(DefaultDistro): - def __init__(self): - super(SUSE11Distro, self).__init__() - self.osutil = SUSE11OSUtil() - -class SUSEDistro(DefaultDistro): - def __init__(self): - super(SUSEDistro, self).__init__() - self.osutil = SUSEOSUtil() - diff --git a/azurelinuxagent/distro/suse/osutil.py b/azurelinuxagent/distro/suse/osutil.py deleted file mode 100644 index 8d6f5bf..0000000 --- a/azurelinuxagent/distro/suse/osutil.py +++ /dev/null @@ -1,108 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, DISTRO_FULL_NAME -from azurelinuxagent.distro.default.osutil import DefaultOSUtil - -class SUSE11OSUtil(DefaultOSUtil): - def __init__(self): - super(SUSE11OSUtil, self).__init__() - self.dhclient_name='dhcpcd' - - def set_hostname(self, hostname): - fileutil.write_file('/etc/HOSTNAME', hostname) - shellutil.run("hostname {0}".format(hostname), chk_err=False) - - def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof {0}".format(self.dhclient_name)) - return ret[1] if ret[0] == 0 else None - - def is_dhcp_enabled(self): - return True - - def stop_dhcp_service(self): - cmd = "/sbin/service {0} stop".format(self.dhclient_name) - return shellutil.run(cmd, chk_err=False) - - def start_dhcp_service(self): - cmd = "/sbin/service {0} start".format(self.dhclient_name) - return shellutil.run(cmd, chk_err=False) - - def start_network(self) : - return shellutil.run("/sbin/service start network", chk_err=False) - - def restart_ssh_service(self): - return shellutil.run("/sbin/service sshd restart", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("/sbin/service waagent stop", chk_err=False) - - def start_agent_service(self): - return shellutil.run("/sbin/service waagent start", chk_err=False) - - def register_agent_service(self): - return shellutil.run("/sbin/insserv waagent", chk_err=False) - - def unregister_agent_service(self): - return shellutil.run("/sbin/insserv -r waagent", chk_err=False) - -class SUSEOSUtil(SUSE11OSUtil): - def __init__(self): - super(SUSEOSUtil, self).__init__() - self.dhclient_name = 'wickedd-dhcp4' - - def stop_dhcp_service(self): - cmd = "systemctl stop {0}".format(self.dhclient_name) - return shellutil.run(cmd, chk_err=False) - - def start_dhcp_service(self): - cmd = "systemctl start {0}".format(self.dhclient_name) - return shellutil.run(cmd, chk_err=False) - - def start_network(self) : - return shellutil.run("systemctl start network", chk_err=False) - - def restart_ssh_service(self): - return shellutil.run("systemctl restart sshd", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("systemctl stop waagent", chk_err=False) - - def start_agent_service(self): - return shellutil.run("systemctl start waagent", chk_err=False) - - def register_agent_service(self): - return shellutil.run("systemctl enable waagent", chk_err=False) - - def unregister_agent_service(self): - return shellutil.run("systemctl disable waagent", chk_err=False) - - diff --git a/azurelinuxagent/distro/ubuntu/__init__.py b/azurelinuxagent/distro/ubuntu/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/distro/ubuntu/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/azurelinuxagent/distro/ubuntu/deprovision.py b/azurelinuxagent/distro/ubuntu/deprovision.py deleted file mode 100644 index da6e834..0000000 --- a/azurelinuxagent/distro/ubuntu/deprovision.py +++ /dev/null @@ -1,46 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.deprovision import DeprovisionHandler, DeprovisionAction - -def del_resolv(): - if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': - logger.info("resolvconf is not configured. Removing /etc/resolv.conf") - fileutil.rm_files('/etc/resolv.conf') - else: - logger.info("resolvconf is enabled; leaving /etc/resolv.conf intact") - fileutil.rm_files('/etc/resolvconf/resolv.conf.d/tail', - '/etc/resolvconf/resolv.conf.d/originial') - - -class UbuntuDeprovisionHandler(DeprovisionHandler): - def __init__(self, distro): - super(UbuntuDeprovisionHandler, self).__init__(distro) - - def setup(self, deluser): - warnings, actions = super(UbuntuDeprovisionHandler, self).setup(deluser) - warnings.append("WARNING! Nameserver configuration in " - "/etc/resolvconf/resolv.conf.d/{tail,originial} " - "will be deleted.") - actions.append(DeprovisionAction(del_resolv)) - return warnings, actions - diff --git a/azurelinuxagent/distro/ubuntu/distro.py b/azurelinuxagent/distro/ubuntu/distro.py deleted file mode 100644 index f380f6c..0000000 --- a/azurelinuxagent/distro/ubuntu/distro.py +++ /dev/null @@ -1,55 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -from azurelinuxagent.distro.default.distro import DefaultDistro -from azurelinuxagent.distro.ubuntu.osutil import Ubuntu14OSUtil, \ - Ubuntu12OSUtil, \ - UbuntuOSUtil, \ - UbuntuSnappyOSUtil - -from azurelinuxagent.distro.ubuntu.provision import UbuntuProvisionHandler -from azurelinuxagent.distro.ubuntu.deprovision import UbuntuDeprovisionHandler - -class UbuntuDistro(DefaultDistro): - def __init__(self): - super(UbuntuDistro, self).__init__() - self.osutil = UbuntuOSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class Ubuntu12Distro(DefaultDistro): - def __init__(self): - super(Ubuntu12Distro, self).__init__() - self.osutil = Ubuntu12OSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class Ubuntu14Distro(DefaultDistro): - def __init__(self): - super(Ubuntu14Distro, self).__init__() - self.osutil = Ubuntu14OSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) - -class UbuntuSnappyDistro(DefaultDistro): - def __init__(self): - super(UbuntuSnappyDistro, self).__init__() - self.osutil = UbuntuSnappyOSUtil() - self.provision_handler = UbuntuProvisionHandler(self) - self.deprovision_handler = UbuntuDeprovisionHandler(self) diff --git a/azurelinuxagent/distro/ubuntu/osutil.py b/azurelinuxagent/distro/ubuntu/osutil.py deleted file mode 100644 index cc4b8ef..0000000 --- a/azurelinuxagent/distro/ubuntu/osutil.py +++ /dev/null @@ -1,75 +0,0 @@ -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import pwd -import shutil -import socket -import array -import struct -import fcntl -import time -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.textutil as textutil -from azurelinuxagent.distro.default.osutil import DefaultOSUtil - -class Ubuntu14OSUtil(DefaultOSUtil): - def __init__(self): - super(Ubuntu14OSUtil, self).__init__() - - def start_network(self): - return shellutil.run("service networking start", chk_err=False) - - def stop_agent_service(self): - return shellutil.run("service walinuxagent stop", chk_err=False) - - def start_agent_service(self): - return shellutil.run("service walinuxagent start", chk_err=False) - -class Ubuntu12OSUtil(Ubuntu14OSUtil): - def __init__(self): - super(Ubuntu12OSUtil, self).__init__() - - #Override - def get_dhcp_pid(self): - ret= shellutil.run_get_output("pidof dhclient3") - return ret[1] if ret[0] == 0 else None - -class UbuntuOSUtil(Ubuntu14OSUtil): - def __init__(self): - super(UbuntuOSUtil, self).__init__() - - def register_agent_service(self): - return shellutil.run("systemctl unmask walinuxagent", chk_err=False) - - def unregister_agent_service(self): - return shellutil.run("systemctl mask walinuxagent", chk_err=False) - -class UbuntuSnappyOSUtil(Ubuntu14OSUtil): - def __init__(self): - super(UbuntuSnappyOSUtil, self).__init__() - self.conf_file_path = '/apps/walinuxagent/current/waagent.conf' - - def remove_rules_files(self, rules_files=""): - pass - - def restore_rules_files(self, rules_files=""): - pass diff --git a/azurelinuxagent/distro/ubuntu/provision.py b/azurelinuxagent/distro/ubuntu/provision.py deleted file mode 100644 index 330e057..0000000 --- a/azurelinuxagent/distro/ubuntu/provision.py +++ /dev/null @@ -1,98 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import time -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.conf as conf -import azurelinuxagent.protocol.ovfenv as ovfenv -from azurelinuxagent.event import add_event, WALAEventOperation -from azurelinuxagent.exception import ProvisionError, ProtocolError -import azurelinuxagent.utils.shellutil as shellutil -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.distro.default.provision import ProvisionHandler - -""" -On ubuntu image, provision could be disabled. -""" -class UbuntuProvisionHandler(ProvisionHandler): - def __init__(self, distro): - self.distro = distro - - def run(self): - #If provision is enabled, run default provision handler - if conf.get_provision_enabled(): - super(UbuntuProvisionHandler, self).run() - return - - logger.info("run Ubuntu provision handler") - provisioned = os.path.join(conf.get_lib_dir(), "provisioned") - if os.path.isfile(provisioned): - return - - logger.info("Waiting cloud-init to copy ovf-env.xml.") - self.wait_for_ovfenv() - - protocol = self.distro.protocol_util.detect_protocol() - self.report_not_ready("Provisioning", "Starting") - logger.info("Sleep 15 seconds to prevent throttling") - time.sleep(15) #Sleep to prevent throttling - try: - logger.info("Wait for ssh host key to be generated.") - thumbprint = self.wait_for_ssh_host_key() - fileutil.write_file(provisioned, "") - logger.info("Finished provisioning") - - except ProvisionError as e: - logger.error("Provision failed: {0}", e) - self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) - return - - self.report_ready(thumbprint) - self.report_event("Provision succeed", is_success=True) - - def wait_for_ovfenv(self, max_retry=60): - """ - Wait for cloud-init to copy ovf-env.xml file from provision ISO - """ - for retry in range(0, max_retry): - try: - self.distro.protocol_util.get_ovf_env() - return - except ProtocolError: - if retry < max_retry - 1: - logger.info("Wait for cloud-init to copy ovf-env.xml") - time.sleep(5) - raise ProvisionError("ovf-env.xml is not copied") - - def wait_for_ssh_host_key(self, max_retry=60): - """ - Wait for cloud-init to generate ssh host key - """ - kepair_type = conf.get_ssh_host_keypair_type() - path = '/etc/ssh/ssh_host_{0}_key'.format(kepair_type) - for retry in range(0, max_retry): - if os.path.isfile(path): - return self.get_ssh_host_key_thumbprint(kepair_type) - if retry < max_retry - 1: - logger.info("Wait for ssh host key be generated: {0}", path) - time.sleep(5) - raise ProvisionError("Ssh hsot key is not generated.") diff --git a/azurelinuxagent/event.py b/azurelinuxagent/event.py deleted file mode 100644 index f38b242..0000000 --- a/azurelinuxagent/event.py +++ /dev/null @@ -1,124 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import sys -import traceback -import atexit -import json -import time -import datetime -import threading -import platform -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import EventError, ProtocolError -from azurelinuxagent.future import ustr -from azurelinuxagent.protocol.restapi import TelemetryEventParam, \ - TelemetryEventList, \ - TelemetryEvent, \ - set_properties, get_properties -from azurelinuxagent.metadata import DISTRO_NAME, DISTRO_VERSION, \ - DISTRO_CODE_NAME, AGENT_VERSION - - -class WALAEventOperation: - HeartBeat="HeartBeat" - Provision = "Provision" - Install = "Install" - UnInstall = "UnInstall" - Disable = "Disable" - Enable = "Enable" - Download = "Download" - Upgrade = "Upgrade" - Update = "Update" - ActivateResourceDisk="ActivateResourceDisk" - UnhandledError="UnhandledError" - -class EventLogger(object): - def __init__(self): - self.event_dir = None - - def save_event(self, data): - if self.event_dir is None: - logger.warn("Event reporter is not initialized.") - return - - if not os.path.exists(self.event_dir): - os.mkdir(self.event_dir) - os.chmod(self.event_dir, 0o700) - if len(os.listdir(self.event_dir)) > 1000: - raise EventError("Too many files under: {0}".format(self.event_dir)) - - filename = os.path.join(self.event_dir, ustr(int(time.time()*1000000))) - try: - with open(filename+".tmp",'wb+') as hfile: - hfile.write(data.encode("utf-8")) - os.rename(filename+".tmp", filename+".tld") - except IOError as e: - raise EventError("Failed to write events to file:{0}", e) - - def add_event(self, name, op="", is_success=True, duration=0, version="1.0", - message="", evt_type="", is_internal=False): - event = TelemetryEvent(1, "69B669B9-4AF8-4C50-BDC4-6006FA76E975") - event.parameters.append(TelemetryEventParam('Name', name)) - event.parameters.append(TelemetryEventParam('Version', version)) - event.parameters.append(TelemetryEventParam('IsInternal', is_internal)) - event.parameters.append(TelemetryEventParam('Operation', op)) - event.parameters.append(TelemetryEventParam('OperationSuccess', - is_success)) - event.parameters.append(TelemetryEventParam('Message', message)) - event.parameters.append(TelemetryEventParam('Duration', duration)) - event.parameters.append(TelemetryEventParam('ExtensionType', evt_type)) - - data = get_properties(event) - try: - self.save_event(json.dumps(data)) - except EventError as e: - logger.error("{0}", e) - -__event_logger__ = EventLogger() - -def add_event(name, op="", is_success=True, duration=0, version="1.0", - message="", evt_type="", is_internal=False, - reporter=__event_logger__): - log = logger.info if is_success else logger.error - log("Event: name={0}, op={1}, message={2}", name, op, message) - - if reporter.event_dir is None: - logger.warn("Event reporter is not initialized.") - return - reporter.add_event(name, op=op, is_success=is_success, duration=duration, - version=version, message=message, evt_type=evt_type, - is_internal=is_internal) - -def init_event_logger(event_dir, reporter=__event_logger__): - reporter.event_dir = event_dir - -def dump_unhandled_err(name): - if hasattr(sys, 'last_type') and hasattr(sys, 'last_value') and \ - hasattr(sys, 'last_traceback'): - last_type = getattr(sys, 'last_type') - last_value = getattr(sys, 'last_value') - last_traceback = getattr(sys, 'last_traceback') - error = traceback.format_exception(last_type, last_value, - last_traceback) - message= "".join(error) - add_event(name, is_success=False, message=message, - op=WALAEventOperation.UnhandledError) - -def enable_unhandled_err_dump(name): - atexit.register(dump_unhandled_err, name) diff --git a/azurelinuxagent/exception.py b/azurelinuxagent/exception.py deleted file mode 100644 index 7fa5cff..0000000 --- a/azurelinuxagent/exception.py +++ /dev/null @@ -1,115 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -""" -Defines all exceptions -""" - -class AgentError(Exception): - """ - Base class of agent error. - """ - def __init__(self, errno, msg, inner=None): - msg = u"({0}){1}".format(errno, msg) - if inner is not None: - msg = u"{0} \n inner error: {1}".format(msg, inner) - super(AgentError, self).__init__(msg) - -class AgentConfigError(AgentError): - """ - When configure file is not found or malformed. - """ - def __init__(self, msg=None, inner=None): - super(AgentConfigError, self).__init__('000001', msg, inner) - -class AgentNetworkError(AgentError): - """ - When network is not avaiable. - """ - def __init__(self, msg=None, inner=None): - super(AgentNetworkError, self).__init__('000002', msg, inner) - -class ExtensionError(AgentError): - """ - When failed to execute an extension - """ - def __init__(self, msg=None, inner=None): - super(ExtensionError, self).__init__('000003', msg, inner) - -class ProvisionError(AgentError): - """ - When provision failed - """ - def __init__(self, msg=None, inner=None): - super(ProvisionError, self).__init__('000004', msg, inner) - -class ResourceDiskError(AgentError): - """ - Mount resource disk failed - """ - def __init__(self, msg=None, inner=None): - super(ResourceDiskError, self).__init__('000005', msg, inner) - -class DhcpError(AgentError): - """ - Failed to handle dhcp response - """ - def __init__(self, msg=None, inner=None): - super(DhcpError, self).__init__('000006', msg, inner) - -class OSUtilError(AgentError): - """ - Failed to perform operation to OS configuration - """ - def __init__(self, msg=None, inner=None): - super(OSUtilError, self).__init__('000007', msg, inner) - -class ProtocolError(AgentError): - """ - Azure protocol error - """ - def __init__(self, msg=None, inner=None): - super(ProtocolError, self).__init__('000008', msg, inner) - -class ProtocolNotFoundError(ProtocolError): - """ - Azure protocol endpoint not found - """ - def __init__(self, msg=None, inner=None): - super(ProtocolNotFoundError, self).__init__(msg, inner) - -class HttpError(AgentError): - """ - Http request failure - """ - def __init__(self, msg=None, inner=None): - super(HttpError, self).__init__('000009', msg, inner) - -class EventError(AgentError): - """ - Event reporting error - """ - def __init__(self, msg=None, inner=None): - super(EventError, self).__init__('000010', msg, inner) - -class CryptError(AgentError): - """ - Encrypt/Decrypt error - """ - def __init__(self, msg=None, inner=None): - super(CryptError, self).__init__('000011', msg, inner) diff --git a/azurelinuxagent/future.py b/azurelinuxagent/future.py deleted file mode 100644 index 8509732..0000000 --- a/azurelinuxagent/future.py +++ /dev/null @@ -1,31 +0,0 @@ -import sys - -""" -Add alies for python2 and python3 libs and fucntions. -""" - -if sys.version_info[0]== 3: - import http.client as httpclient - from urllib.parse import urlparse - - """Rename Python3 str to ustr""" - ustr = str - - bytebuffer = memoryview - - read_input = input - -elif sys.version_info[0] == 2: - import httplib as httpclient - from urlparse import urlparse - - """Rename Python2 unicode to ustr""" - ustr = unicode - - bytebuffer = buffer - - read_input = raw_input - -else: - raise ImportError("Unknown python version:{0}".format(sys.version_info)) - diff --git a/azurelinuxagent/ga/__init__.py b/azurelinuxagent/ga/__init__.py new file mode 100644 index 0000000..1ea2f38 --- /dev/null +++ b/azurelinuxagent/ga/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + diff --git a/azurelinuxagent/ga/env.py b/azurelinuxagent/ga/env.py new file mode 100644 index 0000000..2d67d4b --- /dev/null +++ b/azurelinuxagent/ga/env.py @@ -0,0 +1,112 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import socket +import time +import threading + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.dhcp import get_dhcp_handler +from azurelinuxagent.common.osutil import get_osutil + +def get_env_handler(): + return EnvHandler() + +class EnvHandler(object): + """ + Monitor changes to dhcp and hostname. + If dhcp clinet process re-start has occurred, reset routes, dhcp with fabric. + + Monitor scsi disk. + If new scsi disk found, set timeout + """ + def __init__(self): + self.osutil = get_osutil() + self.dhcp_handler = get_dhcp_handler() + self.stopped = True + self.hostname = None + self.dhcpid = None + self.server_thread=None + + def run(self): + if not self.stopped: + logger.info("Stop existing env monitor service.") + self.stop() + + self.stopped = False + logger.info("Start env monitor service.") + self.dhcp_handler.conf_routes() + self.hostname = socket.gethostname() + self.dhcpid = self.osutil.get_dhcp_pid() + self.server_thread = threading.Thread(target = self.monitor) + self.server_thread.setDaemon(True) + self.server_thread.start() + + def monitor(self): + """ + Monitor dhcp client pid and hostname. + If dhcp clinet process re-start has occurred, reset routes. + """ + while not self.stopped: + self.osutil.remove_rules_files() + timeout = conf.get_root_device_scsi_timeout() + if timeout is not None: + self.osutil.set_scsi_disks_timeout(timeout) + if conf.get_monitor_hostname(): + self.handle_hostname_update() + self.handle_dhclient_restart() + time.sleep(5) + + def handle_hostname_update(self): + curr_hostname = socket.gethostname() + if curr_hostname != self.hostname: + logger.info("EnvMonitor: Detected host name change: {0} -> {1}", + self.hostname, curr_hostname) + self.osutil.set_hostname(curr_hostname) + self.osutil.publish_hostname(curr_hostname) + self.hostname = curr_hostname + + def handle_dhclient_restart(self): + if self.dhcpid is None: + logger.warn("Dhcp client is not running. ") + self.dhcpid = self.osutil.get_dhcp_pid() + return + + #The dhcp process hasn't changed since last check + if self.osutil.check_pid_alive(self.dhcpid.strip()): + return + + newpid = self.osutil.get_dhcp_pid() + if newpid is not None and newpid != self.dhcpid: + logger.info("EnvMonitor: Detected dhcp client restart. " + "Restoring routing table.") + self.dhcp_handler.conf_routes() + self.dhcpid = newpid + + def stop(self): + """ + Stop server comminucation and join the thread to main thread. + """ + self.stopped = True + if self.server_thread is not None: + self.server_thread.join() + diff --git a/azurelinuxagent/ga/exthandlers.py b/azurelinuxagent/ga/exthandlers.py new file mode 100644 index 0000000..d3c8f32 --- /dev/null +++ b/azurelinuxagent/ga/exthandlers.py @@ -0,0 +1,902 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import glob +import json +import os +import shutil +import subprocess +import time +import zipfile + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.shellutil as shellutil + +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ExtensionError, ProtocolError, HttpError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.version import AGENT_VERSION +from azurelinuxagent.common.protocol.restapi import ExtHandlerStatus, \ + ExtensionStatus, \ + ExtensionSubStatus, \ + Extension, \ + VMStatus, ExtHandler, \ + get_properties, \ + set_properties +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.version import AGENT_NAME, CURRENT_AGENT, CURRENT_VERSION + +#HandlerEnvironment.json schema version +HANDLER_ENVIRONMENT_VERSION = 1.0 + +VALID_EXTENSION_STATUS = ['transitioning', 'error', 'success', 'warning'] + +VALID_HANDLER_STATUS = ['Ready', 'NotReady', "Installing", "Unresponsive"] + +def validate_has_key(obj, key, fullname): + if key not in obj: + raise ExtensionError("Missing: {0}".format(fullname)) + +def validate_in_range(val, valid_range, name): + if val not in valid_range: + raise ExtensionError("Invalid {0}: {1}".format(name, val)) + +def parse_formatted_message(formatted_message): + if formatted_message is None: + return None + validate_has_key(formatted_message, 'lang', 'formattedMessage/lang') + validate_has_key(formatted_message, 'message', 'formattedMessage/message') + return formatted_message.get('message') + +def parse_ext_substatus(substatus): + #Check extension sub status format + validate_has_key(substatus, 'status', 'substatus/status') + validate_in_range(substatus['status'], VALID_EXTENSION_STATUS, + 'substatus/status') + status = ExtensionSubStatus() + status.name = substatus.get('name') + status.status = substatus.get('status') + status.code = substatus.get('code', 0) + formatted_message = substatus.get('formattedMessage') + status.message = parse_formatted_message(formatted_message) + return status + +def parse_ext_status(ext_status, data): + if data is None or len(data) is None: + return + #Currently, only the first status will be reported + data = data[0] + #Check extension status format + validate_has_key(data, 'status', 'status') + status_data = data['status'] + validate_has_key(status_data, 'status', 'status/status') + + validate_in_range(status_data['status'], VALID_EXTENSION_STATUS, + 'status/status') + + applied_time = status_data.get('configurationAppliedTime') + ext_status.configurationAppliedTime = applied_time + ext_status.operation = status_data.get('operation') + ext_status.status = status_data.get('status') + ext_status.code = status_data.get('code', 0) + formatted_message = status_data.get('formattedMessage') + ext_status.message = parse_formatted_message(formatted_message) + substatus_list = status_data.get('substatus') + if substatus_list is None: + return + for substatus in substatus_list: + ext_status.substatusList.append(parse_ext_substatus(substatus)) + +class ExtHandlerState(object): + NotInstalled = "NotInstalled" + Installed = "Installed" + Enabled = "Enabled" + +def get_exthandlers_handler(): + return ExtHandlersHandler() + +class ExtHandlersHandler(object): + def __init__(self): + self.protocol_util = get_protocol_util() + self.ext_handlers = None + self.last_etag = None + self.log_report = False + + def run(self): + self.ext_handlers, etag = None, None + try: + self.protocol = self.protocol_util.get_protocol() + self.ext_handlers, etag = self.protocol.get_ext_handlers() + except ProtocolError as e: + msg = u"Exception retrieving extension handlers: {0}".format( + ustr(e)) + logger.warn(msg) + add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=False, message=msg) + return + + if self.last_etag is not None and self.last_etag == etag: + msg = u"Incarnation {0} has no extension updates".format(etag) + logger.verbose(msg) + self.log_report = False + else: + msg = u"Handle extensions updates for incarnation {0}".format(etag) + logger.info(msg) + self.log_report = True #Log status report success on new config + self.handle_ext_handlers() + self.last_etag = etag + + self.report_ext_handlers_status() + + def run_status(self): + self.report_ext_handlers_status() + return + + def handle_ext_handlers(self): + if self.ext_handlers.extHandlers is None or \ + len(self.ext_handlers.extHandlers) == 0: + logger.info("No ext handler config found") + return + + for ext_handler in self.ext_handlers.extHandlers: + #TODO handle install in sequence, enable in parallel + self.handle_ext_handler(ext_handler) + + def handle_ext_handler(self, ext_handler): + ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) + try: + state = ext_handler.properties.state + ext_handler_i.logger.info("Expected handler state: {0}", state) + if state == "enabled": + self.handle_enable(ext_handler_i) + elif state == u"disabled": + self.handle_disable(ext_handler_i) + elif state == u"uninstall": + self.handle_uninstall(ext_handler_i) + else: + message = u"Unknown ext handler state:{0}".format(state) + raise ExtensionError(message) + except ExtensionError as e: + ext_handler_i.set_handler_status(message=ustr(e), code=-1) + ext_handler_i.report_event(message=ustr(e), is_success=False) + + def handle_enable(self, ext_handler_i): + + ext_handler_i.decide_version() + + old_ext_handler_i = ext_handler_i.get_installed_ext_handler() + if old_ext_handler_i is not None and \ + old_ext_handler_i.version_gt(ext_handler_i): + raise ExtensionError(u"Downgrade not allowed") + + handler_state = ext_handler_i.get_handler_state() + ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + if handler_state == ExtHandlerState.NotInstalled: + ext_handler_i.set_handler_state(ExtHandlerState.NotInstalled) + + ext_handler_i.download() + + ext_handler_i.update_settings() + + if old_ext_handler_i is None: + ext_handler_i.install() + elif ext_handler_i.version_gt(old_ext_handler_i): + old_ext_handler_i.disable() + ext_handler_i.copy_status_files(old_ext_handler_i) + ext_handler_i.update() + old_ext_handler_i.uninstall() + old_ext_handler_i.rm_ext_handler_dir() + ext_handler_i.update_with_install() + else: + ext_handler_i.update_settings() + + ext_handler_i.enable() + + def handle_disable(self, ext_handler_i): + handler_state = ext_handler_i.get_handler_state() + ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + if handler_state == ExtHandlerState.Enabled: + ext_handler_i.disable() + + def handle_uninstall(self, ext_handler_i): + handler_state = ext_handler_i.get_handler_state() + ext_handler_i.logger.info("Current handler state is: {0}", handler_state) + if handler_state != ExtHandlerState.NotInstalled: + if handler_state == ExtHandlerState.Enabled: + ext_handler_i.disable() + ext_handler_i.uninstall() + ext_handler_i.rm_ext_handler_dir() + + def report_ext_handlers_status(self): + """Go thru handler_state dir, collect and report status""" + vm_status = VMStatus() + vm_status.vmAgent.version = str(CURRENT_VERSION) + vm_status.vmAgent.status = "Ready" + vm_status.vmAgent.message = "Guest Agent is running" + + if self.ext_handlers is not None: + for ext_handler in self.ext_handlers.extHandlers: + try: + self.report_ext_handler_status(vm_status, ext_handler) + except ExtensionError as e: + add_event( + AGENT_NAME, + version=CURRENT_VERSION, + is_success=False, + message=ustr(e)) + + logger.verbose("Report vm agent status") + + try: + self.protocol.report_vm_status(vm_status) + except ProtocolError as e: + message = "Failed to report vm agent status: {0}".format(e) + add_event(AGENT_NAME, version=CURRENT_VERSION, is_success=False, message=message) + + if self.log_report: + logger.verbose("Successfully reported vm agent status") + + + def report_ext_handler_status(self, vm_status, ext_handler): + ext_handler_i = ExtHandlerInstance(ext_handler, self.protocol) + + handler_status = ext_handler_i.get_handler_status() + if handler_status is None: + return + + handler_state = ext_handler_i.get_handler_state() + if handler_state != ExtHandlerState.NotInstalled: + try: + active_exts = ext_handler_i.report_ext_status() + handler_status.extensions.extend(active_exts) + except ExtensionError as e: + ext_handler_i.set_handler_status(message=ustr(e), code=-1) + + try: + heartbeat = ext_handler_i.collect_heartbeat() + if heartbeat is not None: + handler_status.status = heartbeat.get('status') + except ExtensionError as e: + ext_handler_i.set_handler_status(message=ustr(e), code=-1) + + vm_status.vmAgent.extensionHandlers.append(handler_status) + +class ExtHandlerInstance(object): + def __init__(self, ext_handler, protocol): + self.ext_handler = ext_handler + self.protocol = protocol + self.operation = None + self.pkg = None + + prefix = "[{0}]".format(self.get_full_name()) + self.logger = logger.Logger(logger.DEFAULT_LOGGER, prefix) + + try: + fileutil.mkdir(self.get_log_dir(), mode=0o744) + except IOError as e: + self.logger.error(u"Failed to create extension log dir: {0}", e) + + log_file = os.path.join(self.get_log_dir(), "CommandExecution.log") + self.logger.add_appender(logger.AppenderType.FILE, + logger.LogLevel.INFO, log_file) + + def decide_version(self): + self.logger.info("Decide which version to use") + try: + pkg_list = self.protocol.get_ext_handler_pkgs(self.ext_handler) + except ProtocolError as e: + raise ExtensionError("Failed to get ext handler pkgs", e) + + # Determine the desired and installed versions + requested_version = FlexibleVersion(self.ext_handler.properties.version) + installed_version = FlexibleVersion(self.get_installed_version()) + if installed_version is None: + installed_version = requested_version + + # Divide packages + # - Find the installed package (its version must exactly match) + # - Find the internal candidate (its version must exactly match) + # - Separate the public packages + internal_pkg = None + installed_pkg = None + public_pkgs = [] + for pkg in pkg_list.versions: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version == installed_version: + installed_pkg = pkg + if pkg.isinternal and pkg_version == requested_version: + internal_pkg = pkg + if not pkg.isinternal: + public_pkgs.append(pkg) + + internal_version = FlexibleVersion(internal_pkg.version) \ + if internal_pkg is not None \ + else FlexibleVersion() + public_pkgs.sort(key=lambda pkg: FlexibleVersion(pkg.version), reverse=True) + + # Determine the preferred version and type of upgrade occurring + preferred_version = max(requested_version, installed_version) + is_major_upgrade = preferred_version.major > installed_version.major + allow_minor_upgrade = self.ext_handler.properties.upgradePolicy == 'auto' + + # Find the first public candidate which + # - Matches the preferred major version + # - Does not upgrade to a new, disallowed major version + # - And only increments the minor version if allowed + # Notes: + # - The patch / hotfix version is not considered + public_pkg = None + for pkg in public_pkgs: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version.major == preferred_version.major \ + and (not pkg.disallow_major_upgrade or not is_major_upgrade) \ + and (allow_minor_upgrade or pkg_version.minor == preferred_version.minor): + public_pkg = pkg + break + + # If there are no candidates, locate the highest public version whose + # major matches that installed + if internal_pkg is None and public_pkg is None: + for pkg in public_pkgs: + pkg_version = FlexibleVersion(pkg.version) + if pkg_version.major == installed_version.major: + public_pkg = pkg + break + + public_version = FlexibleVersion(public_pkg.version) \ + if public_pkg is not None \ + else FlexibleVersion() + + # Select the candidate + # - Use the public candidate if there is no internal candidate or + # the public is more recent (e.g., a hotfix patch) + # - Otherwise use the internal candidate + if internal_pkg is None or (public_pkg is not None and public_version > internal_version): + selected_pkg = public_pkg + else: + selected_pkg = internal_pkg + + selected_version = FlexibleVersion(selected_pkg.version) \ + if selected_pkg is not None \ + else FlexibleVersion() + + # Finally, update the version only if not downgrading + # Note: + # - A downgrade, which will be bound to the same major version, + # is allowed if the installed version is no longer available + if selected_pkg is None \ + or (installed_pkg is not None and selected_version < installed_version): + self.pkg = installed_pkg + self.ext_handler.properties.version = installed_version + else: + self.pkg = selected_pkg + self.ext_handler.properties.version = selected_pkg.version + + if self.pkg is None: + raise ExtensionError("Failed to find any valid extension package") + + self.logger.info("Use version: {0}", self.pkg.version) + return + + def version_gt(self, other): + self_version = self.ext_handler.properties.version + other_version = other.ext_handler.properties.version + return Version(self_version) > Version(other_version) + + def get_installed_ext_handler(self): + lastest_version = self.get_installed_version() + if lastest_version is None: + return None + + installed_handler = ExtHandler() + set_properties("ExtHandler", installed_handler, get_properties(self.ext_handler)) + installed_handler.properties.version = lastest_version + return ExtHandlerInstance(installed_handler, self.protocol) + + def get_installed_version(self): + lastest_version = None + + for path in glob.iglob(os.path.join(conf.get_lib_dir(), self.ext_handler.name + "-*")): + if not os.path.isdir(path): + continue + + separator = path.rfind('-') + version = FlexibleVersion(path[separator+1:]) + + if lastest_version is None or lastest_version < version: + lastest_version = version + + return str(lastest_version) if lastest_version is not None else None + + def copy_status_files(self, old_ext_handler_i): + self.logger.info("Copy status files from old plugin to new") + old_ext_dir = old_ext_handler_i.get_base_dir() + new_ext_dir = self.get_base_dir() + + old_ext_mrseq_file = os.path.join(old_ext_dir, "mrseq") + if os.path.isfile(old_ext_mrseq_file): + shutil.copy2(old_ext_mrseq_file, new_ext_dir) + + old_ext_status_dir = old_ext_handler_i.get_status_dir() + new_ext_status_dir = self.get_status_dir() + + if os.path.isdir(old_ext_status_dir): + for status_file in os.listdir(old_ext_status_dir): + status_file = os.path.join(old_ext_status_dir, status_file) + if os.path.isfile(status_file): + shutil.copy2(status_file, new_ext_status_dir) + + def set_operation(self, op): + self.operation = op + + def report_event(self, message="", is_success=True): + version = self.ext_handler.properties.version + add_event(name=self.ext_handler.name, version=version, message=message, + op=self.operation, is_success=is_success) + + def download(self): + self.logger.info("Download extension package") + self.set_operation(WALAEventOperation.Download) + if self.pkg is None: + raise ExtensionError("No package uri found") + + package = None + for uri in self.pkg.uris: + try: + package = self.protocol.download_ext_handler_pkg(uri.uri) + except ProtocolError as e: + logger.warn("Failed download extension: {0}", e) + + if package is None: + raise ExtensionError("Failed to download extension") + + self.logger.info("Unpack extension package") + pkg_file = os.path.join(conf.get_lib_dir(), + os.path.basename(uri.uri) + ".zip") + try: + fileutil.write_file(pkg_file, bytearray(package), asbin=True) + zipfile.ZipFile(pkg_file).extractall(self.get_base_dir()) + except IOError as e: + raise ExtensionError(u"Failed to write and unzip plugin", e) + + chmod = "find {0} -type f | xargs chmod u+x".format(self.get_base_dir()) + shellutil.run(chmod) + self.report_event(message="Download succeeded") + + self.logger.info("Initialize extension directory") + #Save HandlerManifest.json + man_file = fileutil.search_file(self.get_base_dir(), + 'HandlerManifest.json') + + if man_file is None: + raise ExtensionError("HandlerManifest.json not found") + + try: + man = fileutil.read_file(man_file, remove_bom=True) + fileutil.write_file(self.get_manifest_file(), man) + except IOError as e: + raise ExtensionError(u"Failed to save HandlerManifest.json", e) + + #Create status and config dir + try: + status_dir = self.get_status_dir() + fileutil.mkdir(status_dir, mode=0o700) + conf_dir = self.get_conf_dir() + fileutil.mkdir(conf_dir, mode=0o700) + except IOError as e: + raise ExtensionError(u"Failed to create status or config dir", e) + + #Save HandlerEnvironment.json + self.create_handler_env() + + def enable(self): + self.logger.info("Enable extension.") + self.set_operation(WALAEventOperation.Enable) + + man = self.load_manifest() + self.launch_command(man.get_enable_command(), timeout=300) + self.set_handler_state(ExtHandlerState.Enabled) + self.set_handler_status(status="Ready", message="Plugin enabled") + + def disable(self): + self.logger.info("Disable extension.") + self.set_operation(WALAEventOperation.Disable) + + man = self.load_manifest() + self.launch_command(man.get_disable_command(), timeout=900) + self.set_handler_state(ExtHandlerState.Installed) + self.set_handler_status(status="NotReady", message="Plugin disabled") + + def install(self): + self.logger.info("Install extension.") + self.set_operation(WALAEventOperation.Install) + + man = self.load_manifest() + self.launch_command(man.get_install_command(), timeout=900) + self.set_handler_state(ExtHandlerState.Installed) + + def uninstall(self): + self.logger.info("Uninstall extension.") + self.set_operation(WALAEventOperation.UnInstall) + + try: + man = self.load_manifest() + self.launch_command(man.get_uninstall_command()) + except ExtensionError as e: + self.report_event(message=ustr(e), is_success=False) + + def rm_ext_handler_dir(self): + try: + handler_state_dir = self.get_handler_state_dir() + if os.path.isdir(handler_state_dir): + self.logger.info("Remove ext handler dir: {0}", handler_state_dir) + shutil.rmtree(handler_state_dir) + base_dir = self.get_base_dir() + if os.path.isdir(base_dir): + self.logger.info("Remove ext handler dir: {0}", base_dir) + shutil.rmtree(base_dir) + except IOError as e: + message = "Failed to rm ext handler dir: {0}".format(e) + self.report_event(message=message, is_success=False) + + def update(self): + self.logger.info("Update extension.") + self.set_operation(WALAEventOperation.Update) + + man = self.load_manifest() + self.launch_command(man.get_update_command(), timeout=900) + + def update_with_install(self): + man = self.load_manifest() + if man.is_update_with_install(): + self.install() + else: + self.logger.info("UpdateWithInstall not set. " + "Skip install during upgrade.") + self.set_handler_state(ExtHandlerState.Installed) + + def get_largest_seq_no(self): + seq_no = -1 + conf_dir = self.get_conf_dir() + for item in os.listdir(conf_dir): + item_path = os.path.join(conf_dir, item) + if os.path.isfile(item_path): + try: + seperator = item.rfind(".") + if seperator > 0 and item[seperator + 1:] == 'settings': + curr_seq_no = int(item.split('.')[0]) + if curr_seq_no > seq_no: + seq_no = curr_seq_no + except Exception as e: + self.logger.verbose("Failed to parse file name: {0}", item) + continue + return seq_no + + def collect_ext_status(self, ext): + self.logger.verbose("Collect extension status") + + seq_no = self.get_largest_seq_no() + if seq_no == -1: + return None + + status_dir = self.get_status_dir() + ext_status_file = "{0}.status".format(seq_no) + ext_status_file = os.path.join(status_dir, ext_status_file) + + ext_status = ExtensionStatus(seq_no=seq_no) + try: + data_str = fileutil.read_file(ext_status_file) + data = json.loads(data_str) + parse_ext_status(ext_status, data) + except IOError as e: + ext_status.message = u"Failed to get status file {0}".format(e) + ext_status.code = -1 + ext_status.status = "error" + except ValueError as e: + ext_status.message = u"Malformed status file {0}".format(e) + ext_status.code = -1 + ext_status.status = "error" + + return ext_status + + def report_ext_status(self): + active_exts = [] + for ext in self.ext_handler.properties.extensions: + ext_status = self.collect_ext_status(ext) + if ext_status is None: + continue + try: + self.protocol.report_ext_status(self.ext_handler.name, ext.name, + ext_status) + active_exts.append(ext.name) + except ProtocolError as e: + self.logger.error(u"Failed to report extension status: {0}", e) + return active_exts + + def collect_heartbeat(self): + man = self.load_manifest() + if not man.is_report_heartbeat(): + return + heartbeat_file = os.path.join(conf.get_lib_dir(), + self.get_heartbeat_file()) + + self.logger.info("Collect heart beat") + if not os.path.isfile(heartbeat_file): + raise ExtensionError("Failed to get heart beat file") + if not self.is_responsive(heartbeat_file): + return { + "status": "Unresponsive", + "code": -1, + "message": "Extension heartbeat is not responsive" + } + try: + heartbeat_json = fileutil.read_file(heartbeat_file) + heartbeat = json.loads(heartbeat_json)[0]['heartbeat'] + except IOError as e: + raise ExtensionError("Failed to get heartbeat file:{0}".format(e)) + except ValueError as e: + raise ExtensionError("Malformed heartbeat file: {0}".format(e)) + return heartbeat + + def is_responsive(self, heartbeat_file): + last_update=int(time.time() - os.stat(heartbeat_file).st_mtime) + return last_update > 600 # not updated for more than 10 min + + def launch_command(self, cmd, timeout=300): + self.logger.info("Launch command:{0}", cmd) + base_dir = self.get_base_dir() + try: + devnull = open(os.devnull, 'w') + child = subprocess.Popen(base_dir + "/" + cmd, + shell=True, + cwd=base_dir, + stdout=devnull) + except Exception as e: + #TODO do not catch all exception + raise ExtensionError("Failed to launch: {0}, {1}".format(cmd, e)) + + retry = timeout + while retry > 0 and child.poll() is None: + time.sleep(1) + retry -= 1 + if retry == 0: + os.kill(child.pid, 9) + raise ExtensionError("Timeout({0}): {1}".format(timeout, cmd)) + + ret = child.wait() + if ret == None or ret != 0: + raise ExtensionError("Non-zero exit code: {0}, {1}".format(ret, cmd)) + + self.report_event(message="Launch command succeeded: {0}".format(cmd)) + + def load_manifest(self): + man_file = self.get_manifest_file() + try: + data = json.loads(fileutil.read_file(man_file)) + except IOError as e: + raise ExtensionError('Failed to load manifest file.') + except ValueError as e: + raise ExtensionError('Malformed manifest file.') + + return HandlerManifest(data[0]) + + def update_settings_file(self, settings_file, settings): + settings_file = os.path.join(self.get_conf_dir(), settings_file) + try: + fileutil.write_file(settings_file, settings) + except IOError as e: + raise ExtensionError(u"Failed to update settings file", e) + + def update_settings(self): + if self.ext_handler.properties.extensions is None or \ + len(self.ext_handler.properties.extensions) == 0: + #This is the behavior of waagent 2.0.x + #The new agent has to be consistent with the old one. + self.logger.info("Extension has no settings, write empty 0.settings") + self.update_settings_file("0.settings", "") + return + + for ext in self.ext_handler.properties.extensions: + settings = { + 'publicSettings': ext.publicSettings, + 'protectedSettings': ext.protectedSettings, + 'protectedSettingsCertThumbprint': ext.certificateThumbprint + } + ext_settings = { + "runtimeSettings":[{ + "handlerSettings": settings + }] + } + settings_file = "{0}.settings".format(ext.sequenceNumber) + self.logger.info("Update settings file: {0}", settings_file) + self.update_settings_file(settings_file, json.dumps(ext_settings)) + + def create_handler_env(self): + env = [{ + "name": self.ext_handler.name, + "version" : HANDLER_ENVIRONMENT_VERSION, + "handlerEnvironment" : { + "logFolder" : self.get_log_dir(), + "configFolder" : self.get_conf_dir(), + "statusFolder" : self.get_status_dir(), + "heartbeatFile" : self.get_heartbeat_file() + } + }] + try: + fileutil.write_file(self.get_env_file(), json.dumps(env)) + except IOError as e: + raise ExtensionError(u"Failed to save handler environment", e) + + def get_handler_state_dir(self): + return os.path.join(conf.get_lib_dir(), "handler_state", + self.get_full_name()) + + def set_handler_state(self, handler_state): + state_dir = self.get_handler_state_dir() + if not os.path.exists(state_dir): + try: + fileutil.mkdir(state_dir, 0o700) + except IOError as e: + self.logger.error("Failed to create state dir: {0}", e) + + try: + state_file = os.path.join(state_dir, "state") + fileutil.write_file(state_file, handler_state) + except IOError as e: + self.logger.error("Failed to set state: {0}", e) + + def get_handler_state(self): + state_dir = self.get_handler_state_dir() + state_file = os.path.join(state_dir, "state") + if not os.path.isfile(state_file): + return ExtHandlerState.NotInstalled + + try: + return fileutil.read_file(state_file) + except IOError as e: + self.logger.error("Failed to get state: {0}", e) + return ExtHandlerState.NotInstalled + + def set_handler_status(self, status="NotReady", message="", + code=0): + state_dir = self.get_handler_state_dir() + if not os.path.exists(state_dir): + try: + fileutil.mkdir(state_dir, 0o700) + except IOError as e: + self.logger.error("Failed to create state dir: {0}", e) + + handler_status = ExtHandlerStatus() + handler_status.name = self.ext_handler.name + handler_status.version = self.ext_handler.properties.version + handler_status.message = message + handler_status.code = code + handler_status.status = status + status_file = os.path.join(state_dir, "status") + + try: + fileutil.write_file(status_file, + json.dumps(get_properties(handler_status))) + except (IOError, ValueError, ProtocolError) as e: + self.logger.error("Failed to save handler status: {0}", e) + + def get_handler_status(self): + state_dir = self.get_handler_state_dir() + status_file = os.path.join(state_dir, "status") + if not os.path.isfile(status_file): + return None + + try: + data = json.loads(fileutil.read_file(status_file)) + handler_status = ExtHandlerStatus() + set_properties("ExtHandlerStatus", handler_status, data) + return handler_status + except (IOError, ValueError) as e: + self.logger.error("Failed to get handler status: {0}", e) + + def get_full_name(self): + return "{0}-{1}".format(self.ext_handler.name, + self.ext_handler.properties.version) + + def get_base_dir(self): + return os.path.join(conf.get_lib_dir(), self.get_full_name()) + + def get_status_dir(self): + return os.path.join(self.get_base_dir(), "status") + + def get_conf_dir(self): + return os.path.join(self.get_base_dir(), 'config') + + def get_heartbeat_file(self): + return os.path.join(self.get_base_dir(), 'heartbeat.log') + + def get_manifest_file(self): + return os.path.join(self.get_base_dir(), 'HandlerManifest.json') + + def get_env_file(self): + return os.path.join(self.get_base_dir(), 'HandlerEnvironment.json') + + def get_log_dir(self): + return os.path.join(conf.get_ext_log_dir(), self.ext_handler.name, + self.ext_handler.properties.version) + +class HandlerEnvironment(object): + def __init__(self, data): + self.data = data + + def get_version(self): + return self.data["version"] + + def get_log_dir(self): + return self.data["handlerEnvironment"]["logFolder"] + + def get_conf_dir(self): + return self.data["handlerEnvironment"]["configFolder"] + + def get_status_dir(self): + return self.data["handlerEnvironment"]["statusFolder"] + + def get_heartbeat_file(self): + return self.data["handlerEnvironment"]["heartbeatFile"] + +class HandlerManifest(object): + def __init__(self, data): + if data is None or data['handlerManifest'] is None: + raise ExtensionError('Malformed manifest file.') + self.data = data + + def get_name(self): + return self.data["name"] + + def get_version(self): + return self.data["version"] + + def get_install_command(self): + return self.data['handlerManifest']["installCommand"] + + def get_uninstall_command(self): + return self.data['handlerManifest']["uninstallCommand"] + + def get_update_command(self): + return self.data['handlerManifest']["updateCommand"] + + def get_enable_command(self): + return self.data['handlerManifest']["enableCommand"] + + def get_disable_command(self): + return self.data['handlerManifest']["disableCommand"] + + def is_reboot_after_install(self): + """ + Deprecated + """ + return False + + def is_report_heartbeat(self): + return self.data['handlerManifest'].get('reportHeartbeat', False) + + def is_update_with_install(self): + update_mode = self.data['handlerManifest'].get('updateMode') + if update_mode is None: + return True + return update_mode.low() == "updatewithinstall" diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py new file mode 100644 index 0000000..f49cef8 --- /dev/null +++ b/azurelinuxagent/ga/monitor.py @@ -0,0 +1,192 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import datetime +import json +import os +import platform +import time +import threading + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.event import WALAEventOperation, add_event +from azurelinuxagent.common.exception import EventError, ProtocolError, OSUtilError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.protocol.restapi import TelemetryEventParam, \ + TelemetryEventList, \ + TelemetryEvent, \ + set_properties +from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, getattrib +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_CODE_NAME, AGENT_LONG_VERSION, \ + CURRENT_AGENT, CURRENT_VERSION + + +def parse_event(data_str): + try: + return parse_json_event(data_str) + except ValueError: + return parse_xml_event(data_str) + + +def parse_xml_param(param_node): + name = getattrib(param_node, "Name") + value_str = getattrib(param_node, "Value") + attr_type = getattrib(param_node, "T") + value = value_str + if attr_type == 'mt:uint64': + value = int(value_str) + elif attr_type == 'mt:bool': + value = bool(value_str) + elif attr_type == 'mt:float64': + value = float(value_str) + return TelemetryEventParam(name, value) + + +def parse_xml_event(data_str): + try: + xml_doc = parse_doc(data_str) + event_id = getattrib(find(xml_doc, "Event"), 'id') + provider_id = getattrib(find(xml_doc, "Provider"), 'id') + event = TelemetryEvent(event_id, provider_id) + param_nodes = findall(xml_doc, 'Param') + for param_node in param_nodes: + event.parameters.append(parse_xml_param(param_node)) + return event + except Exception as e: + raise ValueError(ustr(e)) + + +def parse_json_event(data_str): + data = json.loads(data_str) + event = TelemetryEvent() + set_properties("TelemetryEvent", event, data) + return event + + +def get_monitor_handler(): + return MonitorHandler() + + +class MonitorHandler(object): + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() + self.sysinfo = [] + + def run(self): + self.init_sysinfo() + + event_thread = threading.Thread(target=self.daemon) + event_thread.setDaemon(True) + event_thread.start() + + def init_sysinfo(self): + osversion = "{0}:{1}-{2}-{3}:{4}".format(platform.system(), + DISTRO_NAME, + DISTRO_VERSION, + DISTRO_CODE_NAME, + platform.release()) + self.sysinfo.append(TelemetryEventParam("OSVersion", osversion)) + self.sysinfo.append( + TelemetryEventParam("GAVersion", CURRENT_AGENT)) + + try: + ram = self.osutil.get_total_mem() + processors = self.osutil.get_processor_cores() + self.sysinfo.append(TelemetryEventParam("RAM", ram)) + self.sysinfo.append(TelemetryEventParam("Processors", processors)) + except OSUtilError as e: + logger.warn("Failed to get system info: {0}", e) + + try: + protocol = self.protocol_util.get_protocol() + vminfo = protocol.get_vminfo() + self.sysinfo.append(TelemetryEventParam("VMName", + vminfo.vmName)) + self.sysinfo.append(TelemetryEventParam("TenantName", + vminfo.tenantName)) + self.sysinfo.append(TelemetryEventParam("RoleName", + vminfo.roleName)) + self.sysinfo.append(TelemetryEventParam("RoleInstanceName", + vminfo.roleInstanceName)) + self.sysinfo.append(TelemetryEventParam("ContainerId", + vminfo.containerId)) + except ProtocolError as e: + logger.warn("Failed to get system info: {0}", e) + + def collect_event(self, evt_file_name): + try: + logger.verbose("Found event file: {0}", evt_file_name) + with open(evt_file_name, "rb") as evt_file: + # if fail to open or delete the file, throw exception + data_str = evt_file.read().decode("utf-8", 'ignore') + logger.verbose("Processed event file: {0}", evt_file_name) + os.remove(evt_file_name) + return data_str + except IOError as e: + msg = "Failed to process {0}, {1}".format(evt_file_name, e) + raise EventError(msg) + + def collect_and_send_events(self): + event_list = TelemetryEventList() + event_dir = os.path.join(conf.get_lib_dir(), "events") + event_files = os.listdir(event_dir) + for event_file in event_files: + if not event_file.endswith(".tld"): + continue + event_file_path = os.path.join(event_dir, event_file) + try: + data_str = self.collect_event(event_file_path) + except EventError as e: + logger.error("{0}", e) + continue + + try: + event = parse_event(data_str) + event.parameters.extend(self.sysinfo) + event_list.events.append(event) + except (ValueError, ProtocolError) as e: + logger.warn("Failed to decode event file: {0}", e) + continue + + if len(event_list.events) == 0: + return + + try: + protocol = self.protocol_util.get_protocol() + protocol.report_event(event_list) + except ProtocolError as e: + logger.error("{0}", e) + + def daemon(self): + last_heartbeat = datetime.datetime.min + period = datetime.timedelta(minutes=30) + while True: + if (datetime.datetime.now() - last_heartbeat) > period: + last_heartbeat = datetime.datetime.now() + add_event(op=WALAEventOperation.HeartBeat, name="WALA", + is_success=True) + try: + self.collect_and_send_events() + except Exception as e: + logger.warn("Failed to send events: {0}", e) + time.sleep(60) diff --git a/azurelinuxagent/ga/update.py b/azurelinuxagent/ga/update.py new file mode 100644 index 0000000..e89608a --- /dev/null +++ b/azurelinuxagent/ga/update.py @@ -0,0 +1,715 @@ +# Windows Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# +import glob +import json +import os +import platform +import re +import shlex +import shutil +import signal +import subprocess +import sys +import time +import zipfile + +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.restutil as restutil +import azurelinuxagent.common.utils.textutil as textutil + +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import UpdateError, ProtocolError +from azurelinuxagent.common.future import ustr +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util +from azurelinuxagent.common.utils.flexible_version import FlexibleVersion +from azurelinuxagent.common.version import AGENT_NAME, AGENT_VERSION, AGENT_LONG_VERSION, \ + AGENT_DIR_GLOB, AGENT_PKG_GLOB, \ + AGENT_PATTERN, AGENT_NAME_PATTERN, AGENT_DIR_PATTERN, \ + CURRENT_AGENT, CURRENT_VERSION, \ + is_current_agent_installed + +from azurelinuxagent.ga.exthandlers import HandlerManifest + + +AGENT_ERROR_FILE = "error.json" # File name for agent error record +AGENT_MANIFEST_FILE = "HandlerManifest.json" + +CHILD_HEALTH_INTERVAL = 15 * 60 +CHILD_LAUNCH_INTERVAL = 5 * 60 +CHILD_LAUNCH_RESTART_MAX = 3 +CHILD_POLL_INTERVAL = 60 + +MAX_FAILURE = 3 # Max failure allowed for agent before blacklisted + +GOAL_STATE_INTERVAL = 25 +REPORT_STATUS_INTERVAL = 15 +RETAIN_INTERVAL = 24 * 60 * 60 # Retain interval for black list + + +def get_update_handler(): + return UpdateHandler() + + +def get_python_cmd(): + major_version = platform.python_version_tuple()[0] + return "python" if int(major_version) <= 2 else "python{0}".format(major_version) + + +class UpdateHandler(object): + + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() + + self.running = True + self.last_etag = None + self.last_attempt_time = None + + self.agents = [] + + self.child_agent = None + self.child_launch_time = None + self.child_launch_attempts = 0 + self.child_process = None + + self.signal_handler = None + return + + def run_latest(self): + """ + This method is called from the daemon to find and launch the most + current, downloaded agent. + + Note: + - Most events should be tagged to the launched agent (agent_version) + """ + + if self.child_process is not None: + raise Exception("Illegal attempt to launch multiple goal state Agent processes") + + if self.signal_handler is None: + self.signal_handler = signal.signal(signal.SIGTERM, self.forward_signal) + + latest_agent = self.get_latest_agent() + if latest_agent is None: + logger.info(u"Installed Agent {0} is the most current agent", CURRENT_AGENT) + agent_cmd = "python -u {0} -run-exthandlers".format(sys.argv[0]) + agent_dir = os.getcwd() + agent_name = CURRENT_AGENT + agent_version = CURRENT_VERSION + else: + logger.info(u"Determined Agent {0} to be the latest agent", latest_agent.name) + agent_cmd = latest_agent.get_agent_cmd() + agent_dir = latest_agent.get_agent_dir() + agent_name = latest_agent.name + agent_version = latest_agent.version + + try: + + # Launch the correct Python version for python-based agents + cmds = shlex.split(agent_cmd) + if cmds[0].lower() == "python": + cmds[0] = get_python_cmd() + agent_cmd = " ".join(cmds) + + self._evaluate_agent_health(latest_agent) + + self.child_process = subprocess.Popen( + cmds, + cwd=agent_dir, + stdout=sys.stdout, + stderr=sys.stderr) + + logger.info(u"Agent {0} launched with command '{1}'", agent_name, agent_cmd) + + ret = None + start_time = time.time() + while (time.time() - start_time) < CHILD_HEALTH_INTERVAL: + time.sleep(CHILD_POLL_INTERVAL) + ret = self.child_process.poll() + if ret is not None: + break + + if ret is None or ret <= 0: + msg = u"Agent {0} launched with command '{1}' is successfully running".format( + agent_name, + agent_cmd) + logger.info(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=True, + message=msg) + + if ret is None: + ret = self.child_process.wait() + + else: + msg = u"Agent {0} launched with command '{1}' failed with return code: {2}".format( + agent_name, + agent_cmd, + ret) + logger.warn(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=False, + message=msg) + + if ret is not None and ret > 0: + msg = u"Agent {0} launched with command '{1}' returned code: {2}".format( + agent_name, + agent_cmd, + ret) + logger.warn(msg) + if latest_agent is not None: + latest_agent.mark_failure() + + except Exception as e: + msg = u"Agent {0} launched with command '{1}' failed with exception: {2}".format( + agent_name, + agent_cmd, + ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + version=agent_version, + op=WALAEventOperation.Enable, + is_success=False, + message=msg) + if latest_agent is not None: + latest_agent.mark_failure(is_fatal=True) + + self.child_process = None + return + + def run(self): + """ + This is the main loop which watches for agent and extension updates. + """ + + logger.info(u"Agent {0} is running as the goal state agent", CURRENT_AGENT) + + # Launch monitoring threads + from azurelinuxagent.ga.monitor import get_monitor_handler + get_monitor_handler().run() + + from azurelinuxagent.ga.env import get_env_handler + get_env_handler().run() + + from azurelinuxagent.ga.exthandlers import get_exthandlers_handler + exthandlers_handler = get_exthandlers_handler() + + # TODO: Add means to stop running + try: + while self.running: + if self._ensure_latest_agent(): + if len(self.agents) > 0: + logger.info( + u"Agent {0} discovered {1} as an update and will exit", + CURRENT_AGENT, + self.agents[0].name) + break + + exthandlers_handler.run() + + time.sleep(25) + + except Exception as e: + logger.warn(u"Agent {0} failed with exception: {1}", CURRENT_AGENT, ustr(e)) + sys.exit(1) + + sys.exit(0) + return + + def forward_signal(self, signum, frame): + if self.child_process is None: + return + + logger.info( + u"Agent {0} forwarding signal {1} to {2}", + CURRENT_AGENT, + signum, + self.child_agent.name if self.child_agent is not None else CURRENT_AGENT) + self.child_process.send_signal(signum) + + if self.signal_handler not in (None, signal.SIG_IGN, signal.SIG_DFL): + self.signal_handler(signum, frame) + elif self.signal_handler is signal.SIG_DFL: + if signum == signal.SIGTERM: + sys.exit(0) + return + + def get_latest_agent(self): + """ + If autoupdate is enabled, return the most current, downloaded, + non-blacklisted agent (if any). + Otherwise, return None (implying to use the installed agent). + """ + + if not conf.get_autoupdate_enabled(): + return None + + self._load_agents() + available_agents = [agent for agent in self.agents if agent.is_available] + return available_agents[0] if len(available_agents) >= 1 else None + + def _ensure_latest_agent(self, base_version=CURRENT_VERSION): + # Ignore new agents if updating is disabled + if not conf.get_autoupdate_enabled(): + return False + + now = time.time() + if self.last_attempt_time is not None: + next_attempt_time = self.last_attempt_time + conf.get_autoupdate_frequency() + else: + next_attempt_time = now + if next_attempt_time > now: + return False + + family = conf.get_autoupdate_gafamily() + logger.info("Checking for agent family {0} updates", family) + + self.last_attempt_time = now + try: + protocol = self.protocol_util.get_protocol() + manifest_list, etag = protocol.get_vmagent_manifests() + except Exception as e: + msg = u"Exception retrieving agent manifests: {0}".format(ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + return False + + if self.last_etag is not None and self.last_etag == etag: + logger.info(u"Incarnation {0} has no agent updates", etag) + return False + + manifests = [m for m in manifest_list.vmAgentManifests if m.family == family] + if len(manifests) == 0: + logger.info(u"Incarnation {0} has no agent family {1} updates", etag, family) + return False + + try: + pkg_list = protocol.get_vmagent_pkgs(manifests[0]) + except ProtocolError as e: + msg= u"Incarnation {0} failed to get {1} package list: {2}".format( + etag, + family, + ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + return False + + # Set the agents to those available for download at least as current as the existing agent + # and remove from disk any agent no longer reported to the VM. + # Note: + # The code leaves on disk available, but blacklisted, agents so as to preserve the state. + # Otherwise, those agents could be again downloaded and inappropriately retried. + self._set_agents([GuestAgent(pkg=pkg) for pkg in pkg_list.versions]) + self._purge_agents() + self._filter_blacklisted_agents() + + # Return True if agents more recent than the current are available + return len(self.agents) > 0 and self.agents[0].version > base_version + + def _evaluate_agent_health(self, latest_agent): + """ + Evaluate the health of the selected agent: If it is restarting + too frequently, raise an Exception to force blacklisting. + """ + if latest_agent is None: + self.child_agent = None + return + + if self.child_agent is None or latest_agent.version != self.child_agent.version: + self.child_agent = latest_agent + self.child_launch_time = None + self.child_launch_attempts = 0 + + if self.child_launch_time is None: + self.child_launch_time = time.time() + + self.child_launch_attempts += 1 + + if (time.time() - self.child_launch_time) <= CHILD_LAUNCH_INTERVAL \ + and self.child_launch_attempts >= CHILD_LAUNCH_RESTART_MAX: + msg = u"Agent {0} restarted more than {1} times in {2} seconds".format( + self.child_agent.name, + CHILD_LAUNCH_RESTART_MAX, + CHILD_LAUNCH_INTERVAL) + raise Exception(msg) + return + + def _filter_blacklisted_agents(self): + self.agents = [agent for agent in self.agents if not agent.is_blacklisted] + return + + def _load_agents(self): + """ + Load all non-blacklisted agents currently on disk. + """ + if len(self.agents) <= 0: + try: + path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) + self._set_agents([GuestAgent(path=agent_dir) + for agent_dir in glob.iglob(path) if os.path.isdir(agent_dir)]) + self._filter_blacklisted_agents() + except Exception as e: + logger.warn(u"Exception occurred loading available agents: {0}", ustr(e)) + return + + def _purge_agents(self): + """ + Remove from disk all directories and .zip files of unknown agents + (without removing the current, running agent). + """ + path = os.path.join(conf.get_lib_dir(), "{0}-*".format(AGENT_NAME)) + + known_versions = [agent.version for agent in self.agents] + if not is_current_agent_installed() and CURRENT_VERSION not in known_versions: + logger.warn( + u"Running Agent {0} was not found in the agent manifest - adding to list", + CURRENT_VERSION) + known_versions.append(CURRENT_VERSION) + + for agent_path in glob.iglob(path): + try: + name = fileutil.trim_ext(agent_path, "zip") + m = AGENT_DIR_PATTERN.match(name) + if m is not None and FlexibleVersion(m.group(1)) not in known_versions: + if os.path.isfile(agent_path): + logger.info(u"Purging outdated Agent file {0}", agent_path) + os.remove(agent_path) + else: + logger.info(u"Purging outdated Agent directory {0}", agent_path) + shutil.rmtree(agent_path) + except Exception as e: + logger.warn(u"Purging {0} raised exception: {1}", agent_path, ustr(e)) + return + + def _set_agents(self, agents=[]): + self.agents = agents + self.agents.sort(key=lambda agent: agent.version, reverse=True) + return + + +class GuestAgent(object): + def __init__(self, path=None, pkg=None): + self.pkg = pkg + version = None + if path is not None: + m = AGENT_DIR_PATTERN.match(path) + if m == None: + raise UpdateError(u"Illegal agent directory: {0}".format(path)) + version = m.group(1) + elif self.pkg is not None: + version = pkg.version + + if version == None: + raise UpdateError(u"Illegal agent version: {0}".format(version)) + self.version = FlexibleVersion(version) + + location = u"disk" if path is not None else u"package" + logger.info(u"Instantiating Agent {0} from {1}", self.name, location) + + self.error = None + self._load_error() + self._ensure_downloaded() + return + + @property + def name(self): + return "{0}-{1}".format(AGENT_NAME, self.version) + + def get_agent_cmd(self): + return self.manifest.get_enable_command() + + def get_agent_dir(self): + return os.path.join(conf.get_lib_dir(), self.name) + + def get_agent_error_file(self): + return os.path.join(conf.get_lib_dir(), self.name, AGENT_ERROR_FILE) + + def get_agent_manifest_path(self): + return os.path.join(self.get_agent_dir(), AGENT_MANIFEST_FILE) + + def get_agent_pkg_path(self): + return ".".join((os.path.join(conf.get_lib_dir(), self.name), "zip")) + + def clear_error(self): + self.error.clear() + return + + @property + def is_available(self): + return self.is_downloaded and not self.is_blacklisted + + @property + def is_blacklisted(self): + return self.error is not None and self.error.is_blacklisted + + @property + def is_downloaded(self): + return self.is_blacklisted or os.path.isfile(self.get_agent_manifest_path()) + + def mark_failure(self, is_fatal=False): + try: + if not os.path.isdir(self.get_agent_dir()): + os.makedirs(self.get_agent_dir()) + self.error.mark_failure(is_fatal=is_fatal) + self.error.save() + if is_fatal: + logger.warn(u"Agent {0} is permanently blacklisted", self.name) + except Exception as e: + logger.warn(u"Agent {0} failed recording error state: {1}", self.name, ustr(e)) + return + + def _ensure_downloaded(self): + try: + logger.info(u"Ensuring Agent {0} is downloaded", self.name) + + if self.is_blacklisted: + logger.info(u"Agent {0} is blacklisted - skipping download", self.name) + return + + if self.is_downloaded: + logger.info(u"Agent {0} was previously downloaded - skipping download", self.name) + self._load_manifest() + return + + if self.pkg is None: + raise UpdateError(u"Agent {0} is missing package and download URIs".format( + self.name)) + + self._download() + self._unpack() + self._load_manifest() + self._load_error() + + msg = u"Agent {0} downloaded successfully".format(self.name) + logger.info(msg) + add_event( + AGENT_NAME, + version=self.version, + op=WALAEventOperation.Install, + is_success=True, + message=msg) + + except Exception as e: + # Note the failure, blacklist the agent if the package downloaded + # - An exception with a downloaded package indicates the package + # is corrupt (e.g., missing the HandlerManifest.json file) + self.mark_failure(is_fatal=os.path.isfile(self.get_agent_pkg_path())) + + msg = u"Agent {0} download failed with exception: {1}".format(self.name, ustr(e)) + logger.warn(msg) + add_event( + AGENT_NAME, + version=self.version, + op=WALAEventOperation.Install, + is_success=False, + message=msg) + return + + def _download(self): + package = None + + for uri in self.pkg.uris: + try: + resp = restutil.http_get(uri.uri, chk_proxy=True) + if resp.status == restutil.httpclient.OK: + package = resp.read() + fileutil.write_file(self.get_agent_pkg_path(), bytearray(package), asbin=True) + logger.info(u"Agent {0} downloaded from {1}", self.name, uri.uri) + break + except restutil.HttpError as e: + logger.warn(u"Agent {0} download from {1} failed", self.name, uri.uri) + + if not os.path.isfile(self.get_agent_pkg_path()): + msg = u"Unable to download Agent {0} from any URI".format(self.name) + add_event( + AGENT_NAME, + op=WALAEventOperation.Download, + version=CURRENT_VERSION, + is_success=False, + message=msg) + raise UpdateError(msg) + return + + def _load_error(self): + try: + if self.error is None: + self.error = GuestAgentError(self.get_agent_error_file()) + self.error.load() + logger.info(u"Agent {0} error state: {1}", self.name, ustr(self.error)) + except Exception as e: + logger.warn(u"Agent {0} failed loading error state: {1}", self.name, ustr(e)) + return + + def _load_manifest(self): + path = self.get_agent_manifest_path() + if not os.path.isfile(path): + msg = u"Agent {0} is missing the {1} file".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + + with open(path, "r") as manifest_file: + try: + manifests = json.load(manifest_file) + except Exception as e: + msg = u"Agent {0} has a malformed {1}".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + if type(manifests) is list: + if len(manifests) <= 0: + msg = u"Agent {0} has an empty {1}".format(self.name, AGENT_MANIFEST_FILE) + raise UpdateError(msg) + manifest = manifests[0] + else: + manifest = manifests + + try: + self.manifest = HandlerManifest(manifest) + if len(self.manifest.get_enable_command()) <= 0: + raise Exception(u"Manifest is missing the enable command") + except Exception as e: + msg = u"Agent {0} has an illegal {1}: {2}".format( + self.name, + AGENT_MANIFEST_FILE, + ustr(e)) + raise UpdateError(msg) + + logger.info( + u"Agent {0} loaded manifest from {1}", + self.name, + self.get_agent_manifest_path()) + logger.verbose(u"Successfully loaded Agent {0} {1}: {2}", + self.name, + AGENT_MANIFEST_FILE, + ustr(self.manifest.data)) + return + + def _unpack(self): + try: + if os.path.isdir(self.get_agent_dir()): + shutil.rmtree(self.get_agent_dir()) + + zipfile.ZipFile(self.get_agent_pkg_path()).extractall(self.get_agent_dir()) + + except Exception as e: + msg = u"Exception unpacking Agent {0} from {1}: {2}".format( + self.name, + self.get_agent_pkg_path(), + ustr(e)) + raise UpdateError(msg) + + if not os.path.isdir(self.get_agent_dir()): + msg = u"Unpacking Agent {0} failed to create directory {1}".format( + self.name, + self.get_agent_dir()) + raise UpdateError(msg) + + logger.info( + u"Agent {0} unpacked successfully to {1}", + self.name, + self.get_agent_dir()) + return + + +class GuestAgentError(object): + def __init__(self, path): + if path is None: + raise UpdateError(u"GuestAgentError requires a path") + self.path = path + + self.clear() + self.load() + return + + def mark_failure(self, is_fatal=False): + self.last_failure = time.time() + self.failure_count += 1 + self.was_fatal = is_fatal + return + + def clear(self): + self.last_failure = 0.0 + self.failure_count = 0 + self.was_fatal = False + return + + def clear_old_failure(self): + if self.last_failure <= 0.0: + return + if self.last_failure < (time.time() - RETAIN_INTERVAL): + self.clear() + return + + @property + def is_blacklisted(self): + return self.was_fatal or self.failure_count >= MAX_FAILURE + + def load(self): + if self.path is not None and os.path.isfile(self.path): + with open(self.path, 'r') as f: + self.from_json(json.load(f)) + return + + def save(self): + if os.path.isdir(os.path.dirname(self.path)): + with open(self.path, 'w') as f: + json.dump(self.to_json(), f) + return + + def from_json(self, data): + self.last_failure = max( + self.last_failure, + data.get(u"last_failure", 0.0)) + self.failure_count = max( + self.failure_count, + data.get(u"failure_count", 0)) + self.was_fatal = self.was_fatal or data.get(u"was_fatal", False) + return + + def to_json(self): + data = { + u"last_failure": self.last_failure, + u"failure_count": self.failure_count, + u"was_fatal" : self.was_fatal + } + return data + + def __str__(self): + return "Last Failure: {0}, Total Failures: {1}, Fatal: {2}".format( + self.last_failure, + self.failure_count, + self.was_fatal) diff --git a/azurelinuxagent/logger.py b/azurelinuxagent/logger.py deleted file mode 100644 index 6c6b406..0000000 --- a/azurelinuxagent/logger.py +++ /dev/null @@ -1,161 +0,0 @@ -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and openssl_bin 1.0+ -# -# Implements parts of RFC 2131, 1541, 1497 and -# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx -# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx -""" -Log utils -""" -import os -import sys -from azurelinuxagent.future import ustr -from datetime import datetime - -class Logger(object): - """ - Logger class - """ - def __init__(self, logger=None, prefix=None): - self.appenders = [] - if logger is not None: - self.appenders.extend(logger.appenders) - self.prefix = prefix - - def verb(self, msg_format, *args): - self.log(LogLevel.VERBOSE, msg_format, *args) - - def info(self, msg_format, *args): - self.log(LogLevel.INFO, msg_format, *args) - - def warn(self, msg_format, *args): - self.log(LogLevel.WARNING, msg_format, *args) - - def error(self, msg_format, *args): - self.log(LogLevel.ERROR, msg_format, *args) - - def log(self, level, msg_format, *args): - #if msg_format is not unicode convert it to unicode - if type(msg_format) is not ustr: - msg_format = ustr(msg_format, errors="backslashreplace") - if len(args) > 0: - msg = msg_format.format(*args) - else: - msg = msg_format - time = datetime.now().strftime(u'%Y/%m/%d %H:%M:%S.%f') - level_str = LogLevel.STRINGS[level] - if self.prefix is not None: - log_item = u"{0} {1} {2} {3}\n".format(time, level_str, self.prefix, - msg) - else: - log_item = u"{0} {1} {2}\n".format(time, level_str, msg) - - log_item = ustr(log_item.encode('ascii', "backslashreplace"), - encoding="ascii") - for appender in self.appenders: - appender.write(level, log_item) - - def add_appender(self, appender_type, level, path): - appender = _create_logger_appender(appender_type, level, path) - self.appenders.append(appender) - -class ConsoleAppender(object): - def __init__(self, level, path): - self.level = LogLevel.INFO - if level >= LogLevel.INFO: - self.level = level - self.path = path - - def write(self, level, msg): - if self.level <= level: - try: - with open(self.path, "w") as console: - console.write(msg) - except IOError: - pass - -class FileAppender(object): - def __init__(self, level, path): - self.level = level - self.path = path - - def write(self, level, msg): - if self.level <= level: - try: - with open(self.path, "a+") as log_file: - log_file.write(msg) - except IOError: - pass - -class StdoutAppender(object): - def __init__(self, level): - self.level = level - - def write(self, level, msg): - if self.level <= level: - try: - sys.stdout.write(msg) - except IOError: - pass - -#Initialize logger instance -DEFAULT_LOGGER = Logger() - -class LogLevel(object): - VERBOSE = 0 - INFO = 1 - WARNING = 2 - ERROR = 3 - STRINGS = [ - "VERBOSE", - "INFO", - "WARNING", - "ERROR" - ] - -class AppenderType(object): - FILE = 0 - CONSOLE = 1 - STDOUT = 2 - -def add_logger_appender(appender_type, level=LogLevel.INFO, path=None): - DEFAULT_LOGGER.add_appender(appender_type, level, path) - -def verb(msg_format, *args): - DEFAULT_LOGGER.verb(msg_format, *args) - -def info(msg_format, *args): - DEFAULT_LOGGER.info(msg_format, *args) - -def warn(msg_format, *args): - DEFAULT_LOGGER.warn(msg_format, *args) - -def error(msg_format, *args): - DEFAULT_LOGGER.error(msg_format, *args) - -def log(level, msg_format, *args): - DEFAULT_LOGGER.log(level, msg_format, args) - -def _create_logger_appender(appender_type, level=LogLevel.INFO, path=None): - if appender_type == AppenderType.CONSOLE: - return ConsoleAppender(level, path) - elif appender_type == AppenderType.FILE: - return FileAppender(level, path) - elif appender_type == AppenderType.STDOUT: - return StdoutAppender(level) - else: - raise ValueError("Unknown appender type") - diff --git a/azurelinuxagent/metadata.py b/azurelinuxagent/metadata.py deleted file mode 100644 index 34fdcf9..0000000 --- a/azurelinuxagent/metadata.py +++ /dev/null @@ -1,82 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import os -import re -import platform -import sys -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.future import ustr - -def get_distro(): - if 'FreeBSD' in platform.system(): - release = re.sub('\-.*\Z', '', ustr(platform.release())) - osinfo = ['freebsd', release, '', 'freebsd'] - if 'linux_distribution' in dir(platform): - osinfo = list(platform.linux_distribution(full_distribution_name=0)) - full_name = platform.linux_distribution()[0].strip() - osinfo.append(full_name) - else: - osinfo = platform.dist() - - #The platform.py lib has issue with detecting oracle linux distribution. - #Merge the following patch provided by oracle as a temparory fix. - if os.path.exists("/etc/oracle-release"): - osinfo[2] = "oracle" - osinfo[3] = "Oracle Linux" - - #Remove trailing whitespace and quote in distro name - osinfo[0] = osinfo[0].strip('"').strip(' ').lower() - return osinfo - -AGENT_NAME = "WALinuxAgent" -AGENT_LONG_NAME = "Azure Linux Agent" -AGENT_VERSION = '2.1.3' -AGENT_LONG_VERSION = "{0}-{1}".format(AGENT_NAME, AGENT_VERSION) -AGENT_DESCRIPTION = """\ -The Azure Linux Agent supports the provisioning and running of Linux -VMs in the Azure cloud. This package should be installed on Linux disk -images that are built to run in the Azure environment. -""" - -__distro__ = get_distro() -DISTRO_NAME = __distro__[0] -DISTRO_VERSION = __distro__[1] -DISTRO_CODE_NAME = __distro__[2] -DISTRO_FULL_NAME = __distro__[3] - -PY_VERSION = sys.version_info -PY_VERSION_MAJOR = sys.version_info[0] -PY_VERSION_MINOR = sys.version_info[1] -PY_VERSION_MICRO = sys.version_info[2] - - -""" -Add this walk arround for detecting Snappy Ubuntu Core temporarily, until ubuntu -fixed this bug: https://bugs.launchpad.net/snappy/+bug/1481086 -""" -def is_snappy(): - if os.path.exists("/etc/motd"): - motd = fileutil.read_file("/etc/motd") - if "snappy" in motd: - return True - return False - -if is_snappy(): - DISTRO_FULL_NAME = "Snappy Ubuntu Core" diff --git a/azurelinuxagent/pa/__init__.py b/azurelinuxagent/pa/__init__.py new file mode 100644 index 0000000..1ea2f38 --- /dev/null +++ b/azurelinuxagent/pa/__init__.py @@ -0,0 +1,17 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + diff --git a/azurelinuxagent/pa/deprovision/__init__.py b/azurelinuxagent/pa/deprovision/__init__.py new file mode 100644 index 0000000..de77168 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/__init__.py @@ -0,0 +1,20 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.deprovision.factory import get_deprovision_handler + +__all__ = ["get_deprovision_handler"] diff --git a/azurelinuxagent/pa/deprovision/coreos.py b/azurelinuxagent/pa/deprovision/coreos.py new file mode 100644 index 0000000..079a913 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/coreos.py @@ -0,0 +1,34 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ + DeprovisionAction + +class CoreOSDeprovisionHandler(DeprovisionHandler): + def __init__(self): + super(CoreOSDeprovisionHandler, self).__init__() + + def setup(self, deluser): + warnings, actions = super(CoreOSDeprovisionHandler, self).setup(deluser) + warnings.append("WARNING! /etc/machine-id will be removed.") + files_to_del = ['/etc/machine-id'] + actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) + return warnings, actions + diff --git a/azurelinuxagent/pa/deprovision/default.py b/azurelinuxagent/pa/deprovision/default.py new file mode 100644 index 0000000..b570c31 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/default.py @@ -0,0 +1,131 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.exception import ProtocolError +from azurelinuxagent.common.future import read_input +import azurelinuxagent.common.utils.fileutil as fileutil +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util + +class DeprovisionAction(object): + def __init__(self, func, args=[], kwargs={}): + self.func = func + self.args = args + self.kwargs = kwargs + + def invoke(self): + self.func(*self.args, **self.kwargs) + +class DeprovisionHandler(object): + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() + + def del_root_password(self, warnings, actions): + warnings.append("WARNING! root password will be disabled. " + "You will not be able to login as root.") + + actions.append(DeprovisionAction(self.osutil.del_root_password)) + + def del_user(self, warnings, actions): + + try: + ovfenv = self.protocol_util.get_ovf_env() + except ProtocolError: + warnings.append("WARNING! ovf-env.xml is not found.") + warnings.append("WARNING! Skip delete user.") + return + + username = ovfenv.username + warnings.append(("WARNING! {0} account and entire home directory " + "will be deleted.").format(username)) + actions.append(DeprovisionAction(self.osutil.del_account, + [username])) + + + def regen_ssh_host_key(self, warnings, actions): + warnings.append("WARNING! All SSH host key pairs will be deleted.") + actions.append(DeprovisionAction(shellutil.run, + ['rm -f /etc/ssh/ssh_host_*key*'])) + + def stop_agent_service(self, warnings, actions): + warnings.append("WARNING! The waagent service will be stopped.") + actions.append(DeprovisionAction(self.osutil.stop_agent_service)) + + def del_files(self, warnings, actions): + files_to_del = ['/root/.bash_history', '/var/log/waagent.log'] + actions.append(DeprovisionAction(fileutil.rm_files, files_to_del)) + + def del_dhcp_lease(self, warnings, actions): + warnings.append("WARNING! Cached DHCP leases will be deleted.") + dirs_to_del = ["/var/lib/dhclient", "/var/lib/dhcpcd", "/var/lib/dhcp"] + actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) + + # For Freebsd + actions.append(DeprovisionAction(fileutil.rm_files, ["/var/db/dhclient.leases.hn0"])) + + def del_lib_dir(self, warnings, actions): + dirs_to_del = [conf.get_lib_dir()] + actions.append(DeprovisionAction(fileutil.rm_dirs, dirs_to_del)) + + def reset_hostname(self, warnings, actions): + localhost = ["localhost.localdomain"] + actions.append(DeprovisionAction(self.osutil.set_hostname, + localhost)) + actions.append(DeprovisionAction(self.osutil.set_dhcp_hostname, + localhost)) + + def setup(self, deluser): + warnings = [] + actions = [] + + self.stop_agent_service(warnings, actions) + if conf.get_regenerate_ssh_host_key(): + self.regen_ssh_host_key(warnings, actions) + + self.del_dhcp_lease(warnings, actions) + self.reset_hostname(warnings, actions) + + if conf.get_delete_root_password(): + self.del_root_password(warnings, actions) + + self.del_lib_dir(warnings, actions) + self.del_files(warnings, actions) + + if deluser: + self.del_user(warnings, actions) + + return warnings, actions + + def run(self, force=False, deluser=False): + warnings, actions = self.setup(deluser) + for warning in warnings: + print(warning) + + if not force: + confirm = read_input("Do you want to proceed (y/n)") + if not confirm.lower().startswith('y'): + return + + for action in actions: + action.invoke() + + diff --git a/azurelinuxagent/pa/deprovision/factory.py b/azurelinuxagent/pa/deprovision/factory.py new file mode 100644 index 0000000..dd01633 --- /dev/null +++ b/azurelinuxagent/pa/deprovision/factory.py @@ -0,0 +1,36 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME + +from .default import DeprovisionHandler +from .coreos import CoreOSDeprovisionHandler +from .ubuntu import UbuntuDeprovisionHandler + +def get_deprovision_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "ubuntu": + return UbuntuDeprovisionHandler() + if distro_name == "coreos": + return CoreOSDeprovisionHandler() + + return DeprovisionHandler() + diff --git a/azurelinuxagent/pa/deprovision/ubuntu.py b/azurelinuxagent/pa/deprovision/ubuntu.py new file mode 100644 index 0000000..14f90de --- /dev/null +++ b/azurelinuxagent/pa/deprovision/ubuntu.py @@ -0,0 +1,47 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.deprovision.default import DeprovisionHandler, \ + DeprovisionAction + +def del_resolv(): + if os.path.realpath('/etc/resolv.conf') != '/run/resolvconf/resolv.conf': + logger.info("resolvconf is not configured. Removing /etc/resolv.conf") + fileutil.rm_files('/etc/resolv.conf') + else: + logger.info("resolvconf is enabled; leaving /etc/resolv.conf intact") + fileutil.rm_files('/etc/resolvconf/resolv.conf.d/tail', + '/etc/resolvconf/resolv.conf.d/originial') + + +class UbuntuDeprovisionHandler(DeprovisionHandler): + def __init__(self): + super(UbuntuDeprovisionHandler, self).__init__() + + def setup(self, deluser): + warnings, actions = super(UbuntuDeprovisionHandler, self).setup(deluser) + warnings.append("WARNING! Nameserver configuration in " + "/etc/resolvconf/resolv.conf.d/{tail,originial} " + "will be deleted.") + actions.append(DeprovisionAction(del_resolv)) + return warnings, actions + diff --git a/azurelinuxagent/pa/provision/__init__.py b/azurelinuxagent/pa/provision/__init__.py new file mode 100644 index 0000000..05f75ae --- /dev/null +++ b/azurelinuxagent/pa/provision/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.provision.factory import get_provision_handler diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py new file mode 100644 index 0000000..b07c147 --- /dev/null +++ b/azurelinuxagent/pa/provision/default.py @@ -0,0 +1,196 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +""" +Provision handler +""" + +import os +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProvisionError, ProtocolError, \ + OSUtilError +from azurelinuxagent.common.protocol.restapi import ProvisionStatus +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.common.osutil import get_osutil +from azurelinuxagent.common.protocol import get_protocol_util + +CUSTOM_DATA_FILE="CustomData" + +class ProvisionHandler(object): + + def __init__(self): + self.osutil = get_osutil() + self.protocol_util = get_protocol_util() + + def run(self): + #If provision is not enabled, return + if not conf.get_provision_enabled(): + logger.info("Provisioning is disabled. Skip.") + return + + provisioned = os.path.join(conf.get_lib_dir(), "provisioned") + if os.path.isfile(provisioned): + return + + logger.info("Run provision handler.") + logger.info("Copy ovf-env.xml.") + try: + ovfenv = self.protocol_util.copy_ovf_env() + except ProtocolError as e: + self.report_event("Failed to copy ovf-env.xml: {0}".format(e)) + return + + self.protocol_util.get_protocol_by_file() + + self.report_not_ready("Provisioning", "Starting") + + try: + logger.info("Start provisioning") + self.provision(ovfenv) + fileutil.write_file(provisioned, "") + thumbprint = self.reg_ssh_host_key() + logger.info("Finished provisioning") + except ProvisionError as e: + logger.error("Provision failed: {0}", e) + self.report_not_ready("ProvisioningFailed", ustr(e)) + self.report_event(ustr(e)) + return + + self.report_ready(thumbprint) + self.report_event("Provision succeed", is_success=True) + + def reg_ssh_host_key(self): + keypair_type = conf.get_ssh_host_keypair_type() + if conf.get_regenerate_ssh_host_key(): + shellutil.run("rm -f /etc/ssh/ssh_host_*key*") + shellutil.run(("ssh-keygen -N '' -t {0} -f /etc/ssh/ssh_host_{1}_key" + "").format(keypair_type, keypair_type)) + thumbprint = self.get_ssh_host_key_thumbprint(keypair_type) + return thumbprint + + def get_ssh_host_key_thumbprint(self, keypair_type): + cmd = "ssh-keygen -lf /etc/ssh/ssh_host_{0}_key.pub".format(keypair_type) + ret = shellutil.run_get_output(cmd) + if ret[0] == 0: + return ret[1].rstrip().split()[1].replace(':', '') + else: + raise ProvisionError(("Failed to generate ssh host key: " + "ret={0}, out= {1}").format(ret[0], ret[1])) + + def provision(self, ovfenv): + logger.info("Handle ovf-env.xml.") + try: + logger.info("Set host name.") + self.osutil.set_hostname(ovfenv.hostname) + + logger.info("Publish host name.") + self.osutil.publish_hostname(ovfenv.hostname) + + self.config_user_account(ovfenv) + + self.save_customdata(ovfenv) + + if conf.get_delete_root_password(): + self.osutil.del_root_password() + + except OSUtilError as e: + raise ProvisionError("Failed to handle ovf-env.xml: {0}".format(e)) + + def config_user_account(self, ovfenv): + logger.info("Create user account if not exists") + self.osutil.useradd(ovfenv.username) + + if ovfenv.user_password is not None: + logger.info("Set user password.") + crypt_id = conf.get_password_cryptid() + salt_len = conf.get_password_crypt_salt_len() + self.osutil.chpasswd(ovfenv.username, ovfenv.user_password, + crypt_id=crypt_id, salt_len=salt_len) + + logger.info("Configure sudoer") + self.osutil.conf_sudoer(ovfenv.username, nopasswd=ovfenv.user_password is None) + + logger.info("Configure sshd") + self.osutil.conf_sshd(ovfenv.disable_ssh_password_auth) + + #Disable selinux temporary + sel = self.osutil.is_selinux_enforcing() + if sel: + self.osutil.set_selinux_enforce(0) + + self.deploy_ssh_pubkeys(ovfenv) + self.deploy_ssh_keypairs(ovfenv) + + if sel: + self.osutil.set_selinux_enforce(1) + + self.osutil.restart_ssh_service() + + def save_customdata(self, ovfenv): + customdata = ovfenv.customdata + if customdata is None: + return + + logger.info("Save custom data") + lib_dir = conf.get_lib_dir() + if conf.get_decode_customdata(): + customdata= self.osutil.decode_customdata(customdata) + + customdata_file = os.path.join(lib_dir, CUSTOM_DATA_FILE) + fileutil.write_file(customdata_file, customdata) + + if conf.get_execute_customdata(): + logger.info("Execute custom data") + os.chmod(customdata_file, 0o700) + shellutil.run(customdata_file) + + def deploy_ssh_pubkeys(self, ovfenv): + for pubkey in ovfenv.ssh_pubkeys: + logger.info("Deploy ssh public key.") + self.osutil.deploy_ssh_pubkey(ovfenv.username, pubkey) + + def deploy_ssh_keypairs(self, ovfenv): + for keypair in ovfenv.ssh_keypairs: + logger.info("Deploy ssh key pairs.") + self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) + + def report_event(self, message, is_success=False): + add_event(name="WALA", message=message, is_success=is_success, + op=WALAEventOperation.Provision) + + def report_not_ready(self, sub_status, description): + status = ProvisionStatus(status="NotReady", subStatus=sub_status, + description=description) + try: + protocol = self.protocol_util.get_protocol() + protocol.report_provision_status(status) + except ProtocolError as e: + self.report_event(ustr(e)) + + def report_ready(self, thumbprint=None): + status = ProvisionStatus(status="Ready") + status.properties.certificateThumbprint = thumbprint + try: + protocol = self.protocol_util.get_protocol() + protocol.report_provision_status(status) + except ProtocolError as e: + self.report_event(ustr(e)) + diff --git a/azurelinuxagent/pa/provision/factory.py b/azurelinuxagent/pa/provision/factory.py new file mode 100644 index 0000000..9bbe35c --- /dev/null +++ b/azurelinuxagent/pa/provision/factory.py @@ -0,0 +1,32 @@ +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.utils.textutil import Version +from azurelinuxagent.common.version import DISTRO_NAME, DISTRO_VERSION, \ + DISTRO_FULL_NAME +from .default import ProvisionHandler +from .ubuntu import UbuntuProvisionHandler + +def get_provision_handler(distro_name=DISTRO_NAME, + distro_version=DISTRO_VERSION, + distro_full_name=DISTRO_FULL_NAME): + if distro_name == "ubuntu": + return UbuntuProvisionHandler() + + return ProvisionHandler() + diff --git a/azurelinuxagent/pa/provision/ubuntu.py b/azurelinuxagent/pa/provision/ubuntu.py new file mode 100644 index 0000000..c334f23 --- /dev/null +++ b/azurelinuxagent/pa/provision/ubuntu.py @@ -0,0 +1,98 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import os +import time +import azurelinuxagent.common.logger as logger +from azurelinuxagent.common.future import ustr +import azurelinuxagent.common.conf as conf +import azurelinuxagent.common.protocol.ovfenv as ovfenv +from azurelinuxagent.common.event import add_event, WALAEventOperation +from azurelinuxagent.common.exception import ProvisionError, ProtocolError +import azurelinuxagent.common.utils.shellutil as shellutil +import azurelinuxagent.common.utils.fileutil as fileutil +from azurelinuxagent.pa.provision.default import ProvisionHandler + +""" +On ubuntu image, provision could be disabled. +""" +class UbuntuProvisionHandler(ProvisionHandler): + def __init__(self): + super(UbuntuProvisionHandler, self).__init__() + + def run(self): + #If provision is enabled, run default provision handler + if conf.get_provision_enabled(): + super(UbuntuProvisionHandler, self).run() + return + + logger.info("run Ubuntu provision handler") + provisioned = os.path.join(conf.get_lib_dir(), "provisioned") + if os.path.isfile(provisioned): + return + + logger.info("Waiting cloud-init to copy ovf-env.xml.") + self.wait_for_ovfenv() + + protocol = self.protocol_util.get_protocol() + self.report_not_ready("Provisioning", "Starting") + logger.info("Sleep 15 seconds to prevent throttling") + time.sleep(15) #Sleep to prevent throttling + try: + logger.info("Wait for ssh host key to be generated.") + thumbprint = self.wait_for_ssh_host_key() + fileutil.write_file(provisioned, "") + logger.info("Finished provisioning") + + except ProvisionError as e: + logger.error("Provision failed: {0}", e) + self.report_not_ready("ProvisioningFailed", ustr(e)) + self.report_event(ustr(e)) + return + + self.report_ready(thumbprint) + self.report_event("Provision succeed", is_success=True) + + def wait_for_ovfenv(self, max_retry=60): + """ + Wait for cloud-init to copy ovf-env.xml file from provision ISO + """ + for retry in range(0, max_retry): + try: + self.protocol_util.get_ovf_env() + return + except ProtocolError: + if retry < max_retry - 1: + logger.info("Wait for cloud-init to copy ovf-env.xml") + time.sleep(5) + raise ProvisionError("ovf-env.xml is not copied") + + def wait_for_ssh_host_key(self, max_retry=60): + """ + Wait for cloud-init to generate ssh host key + """ + keypair_type = conf.get_ssh_host_keypair_type() + path = '/etc/ssh/ssh_host_{0}_key.pub'.format(keypair_type) + for retry in range(0, max_retry): + if os.path.isfile(path): + return self.get_ssh_host_key_thumbprint(keypair_type) + if retry < max_retry - 1: + logger.info("Wait for ssh host key be generated: {0}", path) + time.sleep(5) + raise ProvisionError("ssh host key is not generated.") diff --git a/azurelinuxagent/pa/rdma/__init__.py b/azurelinuxagent/pa/rdma/__init__.py new file mode 100644 index 0000000..dff0ba4 --- /dev/null +++ b/azurelinuxagent/pa/rdma/__init__.py @@ -0,0 +1,18 @@ +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +from azurelinuxagent.pa.rdma.factory import get_rdma_handler diff --git a/azurelinuxagent/pa/rdma/centos.py b/azurelinuxagent/pa/rdma/centos.py new file mode 100644 index 0000000..c527e1b --- /dev/null +++ b/azurelinuxagent/pa/rdma/centos.py @@ -0,0 +1,203 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import glob +import os +import re +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.rdma import RDMAHandler + + +class CentOSRDMAHandler(RDMAHandler): + rdma_user_mode_package_name = 'microsoft-hyper-v-rdma' + rdma_kernel_mode_package_name = 'kmod-microsoft-hyper-v-rdma' + rdma_wrapper_package_name = 'msft-rdma-drivers' + + hyper_v_package_name = "hypervkvpd" + hyper_v_package_name_new = "microsoft-hyper-v" + + version_major = None + version_minor = None + + def __init__(self, distro_version): + v = distro_version.split('.') + if len(v) < 2: + raise Exception('Unexpected centos version: %s' % distro_version) + self.version_major, self.version_minor = v[0], v[1] + + def install_driver(self): + """ + Install the KVP daemon and the appropriate RDMA driver package for the + RDMA firmware. + """ + fw_version = RDMAHandler.get_rdma_version() + if not fw_version: + raise Exception('Cannot determine RDMA firmware version') + logger.info("RDMA: found firmware version: {0}".format(fw_version)) + fw_version = self.get_int_rdma_version(fw_version) + installed_pkg = self.get_rdma_package_info() + if installed_pkg: + logger.info( + 'RDMA: driver package present: {0}'.format(installed_pkg)) + if self.is_rdma_package_up_to_date(installed_pkg, fw_version): + logger.info('RDMA: driver package is up-to-date') + return + else: + logger.info('RDMA: driver package needs updating') + self.update_rdma_package(fw_version) + else: + logger.info('RDMA: driver package is NOT installed') + self.update_rdma_package(fw_version) + + def is_rdma_package_up_to_date(self, pkg, fw_version): + # Example match (pkg name, -, followed by 3 segments, fw_version and -): + # - pkg=microsoft-hyper-v-rdma-4.1.0.142-20160323.x86_64 + # - fw_version=142 + pattern = '{0}-\d\.\d\.\d\.({1})-'.format( + self.rdma_user_mode_package_name, fw_version) + return re.match(pattern, pkg) + + @staticmethod + def get_int_rdma_version(version): + s = version.split('.') + if len(s) == 0: + raise Exception('Unexpected RDMA firmware version: "%s"' % version) + return s[0] + + def get_rdma_package_info(self): + """ + Returns the installed rdma package name or None + """ + ret, output = shellutil.run_get_output( + 'rpm -q %s' % self.rdma_user_mode_package_name, chk_err=False) + if ret != 0: + return None + return output + + def update_rdma_package(self, fw_version): + logger.info("RDMA: updating RDMA packages") + self.refresh_repos() + self.force_install_package(self.rdma_wrapper_package_name) + self.install_rdma_drivers(fw_version) + + def force_install_package(self, pkg_name): + """ + Attempts to remove existing package and installs the package + """ + logger.info('RDMA: Force installing package: %s' % pkg_name) + if self.uninstall_package(pkg_name) != 0: + logger.info('RDMA: Erasing package failed but will continue') + if self.install_package(pkg_name) != 0: + raise Exception('Failed to install package "{0}"'.format(pkg_name)) + logger.info('RDMA: installation completed: %s' % pkg_name) + + @staticmethod + def uninstall_package(pkg_name): + return shellutil.run('yum erase -y -q {0}'.format(pkg_name)) + + @staticmethod + def install_package(pkg_name): + return shellutil.run('yum install -y -q {0}'.format(pkg_name)) + + def refresh_repos(self): + logger.info("RDMA: refreshing yum repos") + if shellutil.run('yum clean all') != 0: + raise Exception('Cleaning yum repositories failed') + if shellutil.run('yum updateinfo') != 0: + raise Exception('Failed to act on yum repo update information') + logger.info("RDMA: repositories refreshed") + + def install_rdma_drivers(self, fw_version): + """ + Installs the drivers from /opt/rdma/rhel[Major][Minor] directory, + particularly the microsoft-hyper-v-rdma-* kmod-* and (no debuginfo or + src). Tries to uninstall them first. + """ + pkg_dir = '/opt/microsoft/rdma/rhel{0}{1}'.format( + self.version_major, self.version_minor) + logger.info('RDMA: pkgs dir: {0}'.format(pkg_dir)) + if not os.path.isdir(pkg_dir): + raise Exception('RDMA packages directory %s is missing' % pkg_dir) + + pkgs = os.listdir(pkg_dir) + logger.info('RDMA: found %d files in package directory' % len(pkgs)) + + # Uninstal KVP daemon first (if exists) + self.uninstall_kvp_driver_package_if_exists() + + # Install kernel mode driver (kmod-microsoft-hyper-v-rdma-*) + kmod_pkg = self.get_file_by_pattern( + pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_kernel_mode_package_name, fw_version)) + if not kmod_pkg: + raise Exception("RDMA kernel mode package not found") + kmod_pkg_path = os.path.join(pkg_dir, kmod_pkg) + self.uninstall_pkg_and_install_from( + 'kernel mode', self.rdma_kernel_mode_package_name, kmod_pkg_path) + + # Install user mode driver (microsoft-hyper-v-rdma-*) + umod_pkg = self.get_file_by_pattern( + pkgs, "%s-\d\.\d\.\d\.+(%s)-\d{8}\.x86_64.rpm" % (self.rdma_user_mode_package_name, fw_version)) + if not umod_pkg: + raise Exception("RDMA user mode package not found") + umod_pkg_path = os.path.join(pkg_dir, umod_pkg) + self.uninstall_pkg_and_install_from( + 'user mode', self.rdma_user_mode_package_name, umod_pkg_path) + + logger.info("RDMA: driver packages installed") + self.load_driver_module() + if not self.is_driver_loaded(): + logger.info("RDMA: driver module is not loaded; reboot required") + self.reboot_system() + else: + logger.info("RDMA: kernel module is loaded") + + @staticmethod + def get_file_by_pattern(list, pattern): + for l in list: + if re.match(pattern, l): + return l + return None + + def uninstall_pkg_and_install_from(self, pkg_type, pkg_name, pkg_path): + logger.info( + "RDMA: Processing {0} driver: {1}".format(pkg_type, pkg_path)) + logger.info("RDMA: Try to uninstall existing version: %s" % pkg_name) + if self.uninstall_package(pkg_name) == 0: + logger.info("RDMA: Successfully uninstaled %s" % pkg_name) + logger.info( + "RDMA: Installing {0} package from {1}".format(pkg_type, pkg_path)) + if self.install_package(pkg_path) != 0: + raise Exception( + "Failed to install RDMA {0} package".format(pkg_type)) + + def uninstall_kvp_driver_package_if_exists(self): + kvp_pkgs = [self.hyper_v_package_name, + self.hyper_v_package_name_new] + + for kvp_pkg in kvp_pkgs: + if shellutil.run("rpm -q %s" % kvp_pkg, chk_err=False) != 0: + logger.info( + "RDMA: kvp package %s does not exist, skipping" % kvp_pkg) + else: + logger.info('RDMA: erasing kvp package "%s"' % kvp_pkg) + if shellutil.run("yum erase -q -y %s" % kvp_pkg, chk_err=False) == 0: + logger.info("RDMA: successfully erased package") + else: + logger.error("RDMA: failed to erase package") diff --git a/azurelinuxagent/pa/rdma/factory.py b/azurelinuxagent/pa/rdma/factory.py new file mode 100644 index 0000000..535b3d3 --- /dev/null +++ b/azurelinuxagent/pa/rdma/factory.py @@ -0,0 +1,41 @@ +# Copyright 2016 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import azurelinuxagent.common.logger as logger + +from azurelinuxagent.common.version import DISTRO_FULL_NAME, DISTRO_VERSION +from azurelinuxagent.common.rdma import RDMAHandler +from .suse import SUSERDMAHandler +from .centos import CentOSRDMAHandler + + +def get_rdma_handler( + distro_full_name=DISTRO_FULL_NAME, + distro_version=DISTRO_VERSION +): + """Return the handler object for RDMA driver handling""" + if ( + distro_full_name == 'SUSE Linux Enterprise Server' and + int(distro_version) > 11 + ): + return SUSERDMAHandler() + + if distro_full_name == 'CentOS Linux' or distro_full_name == 'CentOS': + return CentOSRDMAHandler(distro_version) + + logger.info("No RDMA handler exists for distro='{0}' version='{1}'", distro_full_name, distro_version) + return RDMAHandler() diff --git a/azurelinuxagent/pa/rdma/suse.py b/azurelinuxagent/pa/rdma/suse.py new file mode 100644 index 0000000..f0d8d0f --- /dev/null +++ b/azurelinuxagent/pa/rdma/suse.py @@ -0,0 +1,130 @@ +# Microsoft Azure Linux Agent +# +# Copyright 2014 Microsoft Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Requires Python 2.4+ and Openssl 1.0+ +# + +import glob +import os +import azurelinuxagent.common.logger as logger +import azurelinuxagent.common.utils.shellutil as shellutil +from azurelinuxagent.common.rdma import RDMAHandler + + +class SUSERDMAHandler(RDMAHandler): + + def install_driver(self): + """Install the appropriate driver package for the RDMA firmware""" + + fw_version = RDMAHandler.get_rdma_version() + if not fw_version: + error_msg = 'RDMA: Could not determine firmware version. ' + error_msg += 'Therefore, no driver will be installed.' + logger.error(error_msg) + return + zypper_install = 'zypper -n in %s' + zypper_remove = 'zypper -n rm %s' + zypper_search = 'zypper se -s %s' + package_name = 'msft-rdma-kmp-default' + cmd = zypper_search % package_name + status, repo_package_info = shellutil.run_get_output(cmd) + driver_package_versions = [] + driver_package_installed = False + for entry in repo_package_info.split('\n'): + if package_name in entry: + sections = entry.split('|') + if len(sections) < 4: + error_msg = 'RDMA: Unexpected output from"%s": "%s"' + logger.error(error_msg % (cmd, entry)) + continue + installed = sections[0].strip() + version = sections[3].strip() + driver_package_versions.append(version) + if fw_version in version and installed == 'i': + info_msg = 'RDMA: Matching driver package "%s-%s" ' + info_msg += 'is already installed, nothing to do.' + logger.info(info_msg % (package_name, version)) + return True + if installed == 'i': + driver_package_installed = True + + # If we get here the driver package is installed but the + # version doesn't match or no package is installed + requires_reboot = False + if driver_package_installed: + # Unloading the particular driver with rmmod does not work + # We have to reboot after the new driver is installed + if self.is_driver_loaded(): + info_msg = 'RDMA: Currently loaded driver does not match the ' + info_msg += 'firmware implementation, reboot will be required.' + logger.info(info_msg) + requires_reboot = True + logger.info("RDMA: removing package %s" % package_name) + cmd = zypper_remove % package_name + shellutil.run(cmd) + logger.info("RDMA: removed package %s" % package_name) + + logger.info("RDMA: looking for fw version %s in packages" % fw_version) + for entry in driver_package_versions: + if not fw_version in version: + logger.info("Package '%s' is not a match." % entry) + else: + logger.info("Package '%s' is a match. Installing." % entry) + complete_name = '%s-%s' % (package_name, version) + cmd = zypper_install % complete_name + result = shellutil.run(cmd) + if result: + error_msg = 'RDMA: Failed install of package "%s" ' + error_msg += 'from available repositories.' + logger.error(error_msg % complete_name) + msg = 'RDMA: Successfully installed "%s" from ' + msg += 'configured repositories' + logger.info(msg % complete_name) + self.load_driver_module() + if requires_reboot: + self.reboot_system() + return True + else: + logger.info("RDMA: No suitable match in repos. Trying local.") + local_packages = glob.glob('/opt/microsoft/rdma/*.rpm') + for local_package in local_packages: + logger.info("Examining: %s" % local_package) + if local_package.endswith('.src.rpm'): + continue + if ( + package_name in local_package and + fw_version in local_package + ): + logger.info("RDMA: Installing: %s" % local_package) + cmd = zypper_install % local_package + result = shellutil.run(cmd) + if result: + error_msg = 'RDMA: Failed install of package "%s" ' + error_msg += 'from local package cache' + logger.error(error_msg % local_package) + break + msg = 'RDMA: Successfully installed "%s" from ' + msg += 'local package cache' + logger.info(msg % (local_package)) + self.load_driver_module() + if requires_reboot: + self.reboot_system() + return True + else: + error_msg = 'Unable to find driver package that matches ' + error_msg += 'RDMA firmware version "%s"' % fw_version + logger.error(error_msg) + return diff --git a/azurelinuxagent/protocol/__init__.py b/azurelinuxagent/protocol/__init__.py deleted file mode 100644 index 8c1bbdb..0000000 --- a/azurelinuxagent/protocol/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# diff --git a/azurelinuxagent/protocol/metadata.py b/azurelinuxagent/protocol/metadata.py deleted file mode 100644 index 8a1656f..0000000 --- a/azurelinuxagent/protocol/metadata.py +++ /dev/null @@ -1,195 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ - -import json -import shutil -import os -import time -from azurelinuxagent.exception import ProtocolError, HttpError -from azurelinuxagent.future import httpclient, ustr -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -import azurelinuxagent.utils.restutil as restutil -import azurelinuxagent.utils.textutil as textutil -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.protocol.restapi import * - -METADATA_ENDPOINT='169.254.169.254' -APIVERSION='2015-05-01-preview' -BASE_URI = "http://{0}/Microsoft.Compute/{1}?api-version={2}{3}" - -TRANSPORT_PRV_FILE_NAME = "V2TransportPrivate.pem" -TRANSPORT_CERT_FILE_NAME = "V2TransportCert.pem" - -#TODO remote workarround for azure stack -MAX_PING = 30 -RETRY_PING_INTERVAL = 10 - -def _add_content_type(headers): - if headers is None: - headers = {} - headers["content-type"] = "application/json" - return headers - -class MetadataProtocol(Protocol): - - def __init__(self, apiversion=APIVERSION, endpoint=METADATA_ENDPOINT): - self.apiversion = apiversion - self.endpoint = endpoint - self.identity_uri = BASE_URI.format(self.endpoint, "identity", - self.apiversion, "&$expand=*") - self.cert_uri = BASE_URI.format(self.endpoint, "certificates", - self.apiversion, "&$expand=*") - self.ext_uri = BASE_URI.format(self.endpoint, "extensionHandlers", - self.apiversion, "&$expand=*") - self.provision_status_uri = BASE_URI.format(self.endpoint, - "provisioningStatus", - self.apiversion, "") - self.vm_status_uri = BASE_URI.format(self.endpoint, "status/vmagent", - self.apiversion, "") - self.ext_status_uri = BASE_URI.format(self.endpoint, - "status/extensions/{0}", - self.apiversion, "") - self.event_uri = BASE_URI.format(self.endpoint, "status/telemetry", - self.apiversion, "") - - def _get_data(self, url, headers=None): - try: - resp = restutil.http_get(url, headers=headers) - except HttpError as e: - raise ProtocolError(ustr(e)) - - if resp.status != httpclient.OK: - raise ProtocolError("{0} - GET: {1}".format(resp.status, url)) - - data = resp.read() - etag = resp.getheader('ETag') - if data is None: - return None - data = json.loads(ustr(data, encoding="utf-8")) - return data, etag - - def _put_data(self, url, data, headers=None): - headers = _add_content_type(headers) - try: - resp = restutil.http_put(url, json.dumps(data), headers=headers) - except HttpError as e: - raise ProtocolError(ustr(e)) - if resp.status != httpclient.OK: - raise ProtocolError("{0} - PUT: {1}".format(resp.status, url)) - - def _post_data(self, url, data, headers=None): - headers = _add_content_type(headers) - try: - resp = restutil.http_post(url, json.dumps(data), headers=headers) - except HttpError as e: - raise ProtocolError(ustr(e)) - if resp.status != httpclient.CREATED: - raise ProtocolError("{0} - POST: {1}".format(resp.status, url)) - - def _get_trans_cert(self): - trans_crt_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_CERT_FILE_NAME) - if not os.path.isfile(trans_crt_file): - raise ProtocolError("{0} is missing.".format(trans_crt_file)) - content = fileutil.read_file(trans_crt_file) - return textutil.get_bytes_from_pem(content) - - def detect(self): - self.get_vminfo() - trans_prv_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_PRV_FILE_NAME) - trans_cert_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_CERT_FILE_NAME) - cryptutil = CryptUtil(conf.get_openssl_cmd()) - cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) - - #"Install" the cert and private key to /var/lib/waagent - thumbprint = cryptutil.get_thumbprint_from_crt(trans_cert_file) - prv_file = os.path.join(conf.get_lib_dir(), - "{0}.prv".format(thumbprint)) - crt_file = os.path.join(conf.get_lib_dir(), - "{0}.crt".format(thumbprint)) - shutil.copyfile(trans_prv_file, prv_file) - shutil.copyfile(trans_cert_file, crt_file) - - - def get_vminfo(self): - vminfo = VMInfo() - data, etag = self._get_data(self.identity_uri) - set_properties("vminfo", vminfo, data) - return vminfo - - def get_certs(self): - #TODO download and save certs - return CertList() - - def get_ext_handlers(self): - headers = { - "x-ms-vmagent-public-x509-cert": self._get_trans_cert() - } - ext_list = ExtHandlerList() - data, etag = self._get_data(self.ext_uri, headers=headers) - set_properties("extensionHandlers", ext_list.extHandlers, data) - return ext_list, etag - - def get_ext_handler_pkgs(self, ext_handler): - ext_handler_pkgs = ExtHandlerPackageList() - data = None - for version_uri in ext_handler.versionUris: - try: - data, etag = self._get_data(version_uri.uri) - break - except ProtocolError as e: - logger.warn("Failed to get version uris: {0}", e) - logger.info("Retry getting version uris") - set_properties("extensionPackages", ext_handler_pkgs, data) - return ext_handler_pkgs - - def report_provision_status(self, provision_status): - validata_param('provisionStatus', provision_status, ProvisionStatus) - data = get_properties(provision_status) - self._put_data(self.provision_status_uri, data) - - def report_vm_status(self, vm_status): - validata_param('vmStatus', vm_status, VMStatus) - data = get_properties(vm_status) - #TODO code field is not implemented for metadata protocol yet. Remove it - handler_statuses = data['vmAgent']['extensionHandlers'] - for handler_status in handler_statuses: - try: - handler_status.pop('code', None) - except KeyError: - pass - - self._put_data(self.vm_status_uri, data) - - def report_ext_status(self, ext_handler_name, ext_name, ext_status): - validata_param('extensionStatus', ext_status, ExtensionStatus) - data = get_properties(ext_status) - uri = self.ext_status_uri.format(ext_name) - self._put_data(uri, data) - - def report_event(self, events): - #TODO disable telemetry for azure stack test - #validata_param('events', events, TelemetryEventList) - #data = get_properties(events) - #self._post_data(self.event_uri, data) - pass - diff --git a/azurelinuxagent/protocol/ovfenv.py b/azurelinuxagent/protocol/ovfenv.py deleted file mode 100644 index de6791c..0000000 --- a/azurelinuxagent/protocol/ovfenv.py +++ /dev/null @@ -1,113 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -""" -Copy and parse ovf-env.xml from provisioning ISO and local cache -""" -import os -import re -import shutil -import xml.dom.minidom as minidom -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.fileutil as fileutil -from azurelinuxagent.utils.textutil import parse_doc, findall, find, findtext - -OVF_VERSION = "1.0" -OVF_NAME_SPACE = "http://schemas.dmtf.org/ovf/environment/1" -WA_NAME_SPACE = "http://schemas.microsoft.com/windowsazure" - -def _validate_ovf(val, msg): - if val is None: - raise ProtocolError("Failed to parse OVF XML: {0}".format(msg)) - -class OvfEnv(object): - """ - Read, and process provisioning info from provisioning file OvfEnv.xml - """ - def __init__(self, xml_text): - if xml_text is None: - raise ValueError("ovf-env is None") - logger.verb("Load ovf-env.xml") - self.hostname = None - self.username = None - self.user_password = None - self.customdata = None - self.disable_ssh_password_auth = True - self.ssh_pubkeys = [] - self.ssh_keypairs = [] - self.parse(xml_text) - - def parse(self, xml_text): - """ - Parse xml tree, retreiving user and ssh key information. - Return self. - """ - wans = WA_NAME_SPACE - ovfns = OVF_NAME_SPACE - - xml_doc = parse_doc(xml_text) - - environment = find(xml_doc, "Environment", namespace=ovfns) - _validate_ovf(environment, "Environment not found") - - section = find(environment, "ProvisioningSection", namespace=wans) - _validate_ovf(section, "ProvisioningSection not found") - - version = findtext(environment, "Version", namespace=wans) - _validate_ovf(version, "Version not found") - - if version > OVF_VERSION: - logger.warn("Newer provisioning configuration detected. " - "Please consider updating waagent") - - conf_set = find(section, "LinuxProvisioningConfigurationSet", - namespace=wans) - _validate_ovf(conf_set, "LinuxProvisioningConfigurationSet not found") - - self.hostname = findtext(conf_set, "HostName", namespace=wans) - _validate_ovf(self.hostname, "HostName not found") - - self.username = findtext(conf_set, "UserName", namespace=wans) - _validate_ovf(self.username, "UserName not found") - - self.user_password = findtext(conf_set, "UserPassword", namespace=wans) - - self.customdata = findtext(conf_set, "CustomData", namespace=wans) - - auth_option = findtext(conf_set, "DisableSshPasswordAuthentication", - namespace=wans) - if auth_option is not None and auth_option.lower() == "true": - self.disable_ssh_password_auth = True - else: - self.disable_ssh_password_auth = False - - public_keys = findall(conf_set, "PublicKey", namespace=wans) - for public_key in public_keys: - path = findtext(public_key, "Path", namespace=wans) - fingerprint = findtext(public_key, "Fingerprint", namespace=wans) - value = findtext(public_key, "Value", namespace=wans) - self.ssh_pubkeys.append((path, fingerprint, value)) - - keypairs = findall(conf_set, "KeyPair", namespace=wans) - for keypair in keypairs: - path = findtext(keypair, "Path", namespace=wans) - fingerprint = findtext(keypair, "Fingerprint", namespace=wans) - self.ssh_keypairs.append((path, fingerprint)) - diff --git a/azurelinuxagent/protocol/restapi.py b/azurelinuxagent/protocol/restapi.py deleted file mode 100644 index fbd29ed..0000000 --- a/azurelinuxagent/protocol/restapi.py +++ /dev/null @@ -1,250 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# -import os -import copy -import re -import json -import xml.dom.minidom -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, HttpError -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.restutil as restutil - -def validata_param(name, val, expected_type): - if val is None: - raise ProtocolError("{0} is None".format(name)) - if not isinstance(val, expected_type): - raise ProtocolError(("{0} type should be {1} not {2}" - "").format(name, expected_type, type(val))) - -def set_properties(name, obj, data): - if isinstance(obj, DataContract): - validata_param("Property '{0}'".format(name), data, dict) - for prob_name, prob_val in data.items(): - prob_full_name = "{0}.{1}".format(name, prob_name) - try: - prob = getattr(obj, prob_name) - except AttributeError: - logger.warn("Unknown property: {0}", prob_full_name) - continue - prob = set_properties(prob_full_name, prob, prob_val) - setattr(obj, prob_name, prob) - return obj - elif isinstance(obj, DataContractList): - validata_param("List '{0}'".format(name), data, list) - for item_data in data: - item = obj.item_cls() - item = set_properties(name, item, item_data) - obj.append(item) - return obj - else: - return data - -def get_properties(obj): - if isinstance(obj, DataContract): - data = {} - props = vars(obj) - for prob_name, prob in list(props.items()): - data[prob_name] = get_properties(prob) - return data - elif isinstance(obj, DataContractList): - data = [] - for item in obj: - item_data = get_properties(item) - data.append(item_data) - return data - else: - return obj - -class DataContract(object): - pass - -class DataContractList(list): - def __init__(self, item_cls): - self.item_cls = item_cls - -""" -Data contract between guest and host -""" -class VMInfo(DataContract): - def __init__(self, subscriptionId=None, vmName=None, containerId=None, - roleName=None, roleInstanceName=None, tenantName=None): - self.subscriptionId = subscriptionId - self.vmName = vmName - self.containerId = containerId - self.roleName = roleName - self.roleInstanceName = roleInstanceName - self.tenantName = tenantName - -class Cert(DataContract): - def __init__(self, name=None, thumbprint=None, certificateDataUri=None): - self.name = name - self.thumbprint = thumbprint - self.certificateDataUri = certificateDataUri - -class CertList(DataContract): - def __init__(self): - self.certificates = DataContractList(Cert) - -class Extension(DataContract): - def __init__(self, name=None, sequenceNumber=None, publicSettings=None, - protectedSettings=None, certificateThumbprint=None): - self.name = name - self.sequenceNumber = sequenceNumber - self.publicSettings = publicSettings - self.protectedSettings = protectedSettings - self.certificateThumbprint = certificateThumbprint - -class ExtHandlerProperties(DataContract): - def __init__(self): - self.version = None - self.upgradePolicy = None - self.state = None - self.extensions = DataContractList(Extension) - -class ExtHandlerVersionUri(DataContract): - def __init__(self): - self.uri = None - -class ExtHandler(DataContract): - def __init__(self, name=None): - self.name = name - self.properties = ExtHandlerProperties() - self.versionUris = DataContractList(ExtHandlerVersionUri) - -class ExtHandlerList(DataContract): - def __init__(self): - self.extHandlers = DataContractList(ExtHandler) - -class ExtHandlerPackageUri(DataContract): - def __init__(self, uri=None): - self.uri = uri - -class ExtHandlerPackage(DataContract): - def __init__(self, version = None): - self.version = version - self.uris = DataContractList(ExtHandlerPackageUri) - -class ExtHandlerPackageList(DataContract): - def __init__(self): - self.versions = DataContractList(ExtHandlerPackage) - -class VMProperties(DataContract): - def __init__(self, certificateThumbprint=None): - #TODO need to confirm the property name - self.certificateThumbprint = certificateThumbprint - -class ProvisionStatus(DataContract): - def __init__(self, status=None, subStatus=None, description=None): - self.status = status - self.subStatus = subStatus - self.description = description - self.properties = VMProperties() - -class ExtensionSubStatus(DataContract): - def __init__(self, name=None, status=None, code=None, message=None): - self.name = name - self.status = status - self.code = code - self.message = message - -class ExtensionStatus(DataContract): - def __init__(self, configurationAppliedTime=None, operation=None, - status=None, seq_no=None, code=None, message=None): - self.configurationAppliedTime = configurationAppliedTime - self.operation = operation - self.status = status - self.sequenceNumber = seq_no - self.code = code - self.message = message - self.substatusList = DataContractList(ExtensionSubStatus) - -class ExtHandlerStatus(DataContract): - def __init__(self, name=None, version=None, status=None, code=0, - message=None): - self.name = name - self.version = version - self.status = status - self.code = code - self.message = message - self.extensions = DataContractList(ustr) - -class VMAgentStatus(DataContract): - def __init__(self, version=None, status=None, message=None): - self.version = version - self.status = status - self.message = message - self.extensionHandlers = DataContractList(ExtHandlerStatus) - -class VMStatus(DataContract): - def __init__(self): - self.vmAgent = VMAgentStatus() - -class TelemetryEventParam(DataContract): - def __init__(self, name=None, value=None): - self.name = name - self.value = value - -class TelemetryEvent(DataContract): - def __init__(self, eventId=None, providerId=None): - self.eventId = eventId - self.providerId = providerId - self.parameters = DataContractList(TelemetryEventParam) - -class TelemetryEventList(DataContract): - def __init__(self): - self.events = DataContractList(TelemetryEvent) - -class Protocol(DataContract): - - def detect(self): - raise NotImplementedError() - - def get_vminfo(self): - raise NotImplementedError() - - def get_certs(self): - raise NotImplementedError() - - def get_ext_handlers(self): - raise NotImplementedError() - - def get_ext_handler_pkgs(self, extension): - raise NotImplementedError() - - def download_ext_handler_pkg(self, uri): - try: - resp = restutil.http_get(uri, chk_proxy=True) - if resp.status == restutil.httpclient.OK: - return resp.read() - except HttpError as e: - raise ProtocolError("Failed to download from: {0}".format(uri), e) - - def report_provision_status(self, provision_status): - raise NotImplementedError() - - def report_vm_status(self, vm_status): - raise NotImplementedError() - - def report_ext_status(self, ext_handler_name, ext_name, ext_status): - raise NotImplementedError() - - def report_event(self, event): - raise NotImplementedError() - diff --git a/azurelinuxagent/protocol/wire.py b/azurelinuxagent/protocol/wire.py deleted file mode 100644 index 7b5ffe8..0000000 --- a/azurelinuxagent/protocol/wire.py +++ /dev/null @@ -1,1155 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ - -import os -import json -import re -import time -import traceback -import xml.sax.saxutils as saxutils -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import ProtocolError, HttpError, \ - ProtocolNotFoundError -from azurelinuxagent.future import ustr, httpclient, bytebuffer -import azurelinuxagent.utils.restutil as restutil -from azurelinuxagent.utils.textutil import parse_doc, findall, find, findtext, \ - getattrib, gettext, remove_bom, \ - get_bytes_from_pem -import azurelinuxagent.utils.fileutil as fileutil -import azurelinuxagent.utils.shellutil as shellutil -from azurelinuxagent.utils.cryptutil import CryptUtil -from azurelinuxagent.protocol.restapi import * - -VERSION_INFO_URI = "http://{0}/?comp=versions" -GOAL_STATE_URI = "http://{0}/machine/?comp=goalstate" -HEALTH_REPORT_URI = "http://{0}/machine?comp=health" -ROLE_PROP_URI = "http://{0}/machine?comp=roleProperties" -TELEMETRY_URI = "http://{0}/machine?comp=telemetrydata" - -WIRE_SERVER_ADDR_FILE_NAME = "WireServer" -INCARNATION_FILE_NAME = "Incarnation" -GOAL_STATE_FILE_NAME = "GoalState.{0}.xml" -HOSTING_ENV_FILE_NAME = "HostingEnvironmentConfig.xml" -SHARED_CONF_FILE_NAME = "SharedConfig.xml" -CERTS_FILE_NAME = "Certificates.xml" -P7M_FILE_NAME = "Certificates.p7m" -PEM_FILE_NAME = "Certificates.pem" -EXT_CONF_FILE_NAME = "ExtensionsConfig.{0}.xml" -MANIFEST_FILE_NAME = "{0}.{1}.manifest.xml" -TRANSPORT_CERT_FILE_NAME = "TransportCert.pem" -TRANSPORT_PRV_FILE_NAME = "TransportPrivate.pem" - -PROTOCOL_VERSION = "2012-11-30" -ENDPOINT_FINE_NAME = "WireServer" - -SHORT_WAITING_INTERVAL = 1 # 1 second -LONG_WAITING_INTERVAL = 15 # 15 seconds - -class WireProtocolResourceGone(ProtocolError): - pass - -class WireProtocol(Protocol): - """Slim layer to adapte wire protocol data to metadata protocol interface""" - - def __init__(self, endpoint): - if endpoint is None: - raise ProtocolError("WireProtocl endpoint is None") - self.endpoint = endpoint - self.client = WireClient(self.endpoint) - - def detect(self): - self.client.check_wire_protocol_version() - - trans_prv_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_PRV_FILE_NAME) - trans_cert_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_CERT_FILE_NAME) - cryptutil = CryptUtil(conf.get_openssl_cmd()) - cryptutil.gen_transport_cert(trans_prv_file, trans_cert_file) - - self.client.update_goal_state(forced=True) - - def get_vminfo(self): - goal_state = self.client.get_goal_state() - hosting_env = self.client.get_hosting_env() - - vminfo = VMInfo() - vminfo.subscriptionId = None - vminfo.vmName = hosting_env.vm_name - vminfo.tenantName = hosting_env.deployment_name - vminfo.roleName = hosting_env.role_name - vminfo.roleInstanceName = goal_state.role_instance_id - vminfo.containerId = goal_state.container_id - return vminfo - - def get_certs(self): - certificates = self.client.get_certs() - return certificates.cert_list - - def get_ext_handlers(self): - logger.verb("Get extension handler config") - #Update goal state to get latest extensions config - self.client.update_goal_state() - goal_state = self.client.get_goal_state() - ext_conf = self.client.get_ext_conf() - #In wire protocol, incarnation is equivalent to ETag - return ext_conf.ext_handlers, goal_state.incarnation - - def get_ext_handler_pkgs(self, ext_handler): - logger.verb("Get extension handler package") - goal_state = self.client.get_goal_state() - man = self.client.get_ext_manifest(ext_handler, goal_state) - return man.pkg_list - - def report_provision_status(self, provision_status): - validata_param("provision_status", provision_status, ProvisionStatus) - - if provision_status.status is not None: - self.client.report_health(provision_status.status, - provision_status.subStatus, - provision_status.description) - if provision_status.properties.certificateThumbprint is not None: - thumbprint = provision_status.properties.certificateThumbprint - self.client.report_role_prop(thumbprint) - - def report_vm_status(self, vm_status): - validata_param("vm_status", vm_status, VMStatus) - self.client.status_blob.set_vm_status(vm_status) - self.client.upload_status_blob() - - def report_ext_status(self, ext_handler_name, ext_name, ext_status): - validata_param("ext_status", ext_status, ExtensionStatus) - self.client.status_blob.set_ext_status(ext_handler_name, ext_status) - - def report_event(self, events): - validata_param("events", events, TelemetryEventList) - self.client.report_event(events) - -def _build_role_properties(container_id, role_instance_id, thumbprint): - xml = (u"" - u"" - u"" - u"{0}" - u"" - u"" - u"{1}" - u"" - u"" - u"" - u"" - u"" - u"" - u"" - u"").format(container_id, role_instance_id, thumbprint) - return xml - -def _build_health_report(incarnation, container_id, role_instance_id, - status, substatus, description): - #Escape '&', '<' and '>' - description = saxutils.escape(ustr(description)) - detail = u'' - if substatus is not None: - substatus = saxutils.escape(ustr(substatus)) - detail = (u"
" - u"{0}" - u"{1}" - u"
").format(substatus, description) - xml = (u"" - u"" - u"{0}" - u"" - u"{1}" - u"" - u"" - u"{2}" - u"" - u"{3}" - u"{4}" - u"" - u"" - u"" - u"" - u"" - u"").format(incarnation, - container_id, - role_instance_id, - status, - detail) - return xml - -""" -Convert VMStatus object to status blob format -""" -def ga_status_to_v1(ga_status): - formatted_msg = { - 'lang' : 'en-US', - 'message' : ga_status.message - } - v1_ga_status = { - 'version' : ga_status.version, - 'status' : ga_status.status, - 'formattedMessage' : formatted_msg - } - return v1_ga_status - -def ext_substatus_to_v1(sub_status_list): - status_list = [] - for substatus in sub_status_list: - status = { - "name": substatus.name, - "status": substatus.status, - "code": substatus.code, - "formattedMessage":{ - "lang": "en-US", - "message": substatus.message - } - } - status_list.append(status) - return status_list - -def ext_status_to_v1(ext_name, ext_status): - if ext_status is None: - return None - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - v1_sub_status = ext_substatus_to_v1(ext_status.substatusList) - v1_ext_status = { - "status":{ - "name": ext_name, - "configurationAppliedTime": ext_status.configurationAppliedTime, - "operation": ext_status.operation, - "status": ext_status.status, - "code": ext_status.code, - "formattedMessage": { - "lang":"en-US", - "message": ext_status.message - } - }, - "version": 1.0, - "timestampUTC": timestamp - } - if len(v1_sub_status) != 0: - v1_ext_status['substatus'] = v1_sub_status - return v1_ext_status - -def ext_handler_status_to_v1(handler_status, ext_statuses, timestamp): - v1_handler_status = { - 'handlerVersion' : handler_status.version, - 'handlerName' : handler_status.name, - 'status' : handler_status.status, - 'code': handler_status.code - } - if handler_status.message is not None: - v1_handler_status["formattedMessage"] = { - "lang":"en-US", - "message": handler_status.message - } - - if len(handler_status.extensions) > 0: - #Currently, no more than one extension per handler - ext_name = handler_status.extensions[0] - ext_status = ext_statuses.get(ext_name) - v1_ext_status = ext_status_to_v1(ext_name, ext_status) - if ext_status is not None and v1_ext_status is not None: - v1_handler_status["runtimeSettingsStatus"] = { - 'settingsStatus' : v1_ext_status, - 'sequenceNumber' : ext_status.sequenceNumber - } - return v1_handler_status - -def vm_status_to_v1(vm_status, ext_statuses): - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - - v1_ga_status = ga_status_to_v1(vm_status.vmAgent) - v1_handler_status_list = [] - for handler_status in vm_status.vmAgent.extensionHandlers: - v1_handler_status = ext_handler_status_to_v1(handler_status, - ext_statuses, timestamp) - if v1_handler_status is not None: - v1_handler_status_list.append(v1_handler_status) - - v1_agg_status = { - 'guestAgentStatus': v1_ga_status, - 'handlerAggregateStatus' : v1_handler_status_list - } - v1_vm_status = { - 'version' : '1.0', - 'timestampUTC' : timestamp, - 'aggregateStatus' : v1_agg_status - } - return v1_vm_status - - -class StatusBlob(object): - def __init__(self, client): - self.vm_status = None - self.ext_statuses = {} - self.client = client - - def set_vm_status(self, vm_status): - validata_param("vmAgent", vm_status, VMStatus) - self.vm_status = vm_status - - def set_ext_status(self, ext_handler_name, ext_status): - validata_param("extensionStatus", ext_status, ExtensionStatus) - self.ext_statuses[ext_handler_name]= ext_status - - def to_json(self): - report = vm_status_to_v1(self.vm_status, self.ext_statuses) - return json.dumps(report) - - __storage_version__ = "2014-02-14" - - def upload(self, url): - #TODO upload extension only if content has changed - logger.verb("Upload status blob") - blob_type = self.get_blob_type(url) - - data = self.to_json() - try: - if blob_type == "BlockBlob": - self.put_block_blob(url, data) - elif blob_type == "PageBlob": - self.put_page_blob(url, data) - else: - raise ProtocolError("Unknown blob type: {0}".format(blob_type)) - except HttpError as e: - raise ProtocolError("Failed to upload status blob: {0}".format(e)) - - def get_blob_type(self, url): - #Check blob type - logger.verb("Check blob type.") - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - try: - resp = self.client.call_storage_service(restutil.http_head, url, { - "x-ms-date" : timestamp, - 'x-ms-version' : self.__class__.__storage_version__ - }) - except HttpError as e: - raise ProtocolError((u"Failed to get status blob type: {0}" - u"").format(e)) - if resp is None or resp.status != httpclient.OK: - raise ProtocolError(("Failed to get status blob type: {0}" - "").format(resp.status)) - - blob_type = resp.getheader("x-ms-blob-type") - logger.verb("Blob type={0}".format(blob_type)) - return blob_type - - def put_block_blob(self, url, data): - logger.verb("Upload block blob") - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - try: - resp = self.client.call_storage_service(restutil.http_put, url, - data, { - "x-ms-date" : timestamp, - "x-ms-blob-type" : "BlockBlob", - "Content-Length": ustr(len(data)), - "x-ms-version" : self.__class__.__storage_version__ - }) - except HttpError as e: - raise ProtocolError((u"Failed to upload block blob: {0}" - u"").format(e)) - if resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to upload block blob: {0}" - "").format(resp.status)) - - def put_page_blob(self, url, data): - logger.verb("Replace old page blob") - - #Convert string into bytes - data=bytearray(data, encoding='utf-8') - timestamp = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()) - - #Align to 512 bytes - page_blob_size = int((len(data) + 511) / 512) * 512 - try: - resp = self.client.call_storage_service(restutil.http_put, url, - "", { - "x-ms-date" : timestamp, - "x-ms-blob-type" : "PageBlob", - "Content-Length": "0", - "x-ms-blob-content-length" : ustr(page_blob_size), - "x-ms-version" : self.__class__.__storage_version__ - }) - except HttpError as e: - raise ProtocolError((u"Failed to clean up page blob: {0}" - u"").format(e)) - if resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to clean up page blob: {0}" - "").format(resp.status)) - - if url.count("?") < 0: - url = "{0}?comp=page".format(url) - else: - url = "{0}&comp=page".format(url) - - logger.verb("Upload page blob") - page_max = 4 * 1024 * 1024 #Max page size: 4MB - start = 0 - end = 0 - while end < len(data): - end = min(len(data), start + page_max) - content_size = end - start - #Align to 512 bytes - page_end = int((end + 511) / 512) * 512 - buf_size = page_end - start - buf = bytearray(buf_size) - buf[0: content_size] = data[start: end] - try: - resp = self.client.call_storage_service(restutil.http_put, url, - bytebuffer(buf), { - "x-ms-date" : timestamp, - "x-ms-range" : "bytes={0}-{1}".format(start, page_end - 1), - "x-ms-page-write" : "update", - "x-ms-version" : self.__class__.__storage_version__, - "Content-Length": ustr(page_end - start) - }) - except HttpError as e: - raise ProtocolError((u"Failed to upload page blob: {0}" - u"").format(e)) - if resp is None or resp.status != httpclient.CREATED: - raise ProtocolError(("Failed to upload page blob: {0}" - "").format(resp.status)) - start = end - -def event_param_to_v1(param): - param_format = '' - param_type = type(param.value) - attr_type = "" - if param_type is int: - attr_type = 'mt:uint64' - elif param_type is str: - attr_type = 'mt:wstr' - elif ustr(param_type).count("'unicode'") > 0: - attr_type = 'mt:wstr' - elif param_type is bool: - attr_type = 'mt:bool' - elif param_type is float: - attr_type = 'mt:float64' - return param_format.format(param.name, saxutils.quoteattr(ustr(param.value)), - attr_type) - -def event_to_v1(event): - params = "" - for param in event.parameters: - params += event_param_to_v1(param) - event_str = ('' - '' - '').format(event.eventId, params) - return event_str - -class WireClient(object): - def __init__(self, endpoint): - logger.info("Wire server endpoint:{0}", endpoint) - self.endpoint = endpoint - self.goal_state = None - self.updated = None - self.hosting_env = None - self.shared_conf = None - self.certs = None - self.ext_conf = None - self.last_request = 0 - self.req_count = 0 - self.status_blob = StatusBlob(self) - - def prevent_throttling(self): - """ - Try to avoid throttling of wire server - """ - now = time.time() - if now - self.last_request < 1: - logger.verb("Last request issued less than 1 second ago") - logger.verb("Sleep {0} second to avoid throttling.", - SHORT_WAITING_INTERVAL) - time.sleep(SHORT_WAITING_INTERVAL) - self.last_request = now - - self.req_count += 1 - if self.req_count % 3 == 0: - logger.verb("Sleep {0} second to avoid throttling.", - SHORT_WAITING_INTERVAL) - time.sleep(SHORT_WAITING_INTERVAL) - self.req_count = 0 - - def call_wireserver(self, http_req, *args, **kwargs): - """ - Call wire server. Handle throttling(403) and Resource Gone(410) - """ - self.prevent_throttling() - for retry in range(0, 3): - resp = http_req(*args, **kwargs) - if resp.status == httpclient.FORBIDDEN: - logger.warn("Sending too much request to wire server") - logger.info("Sleep {0} second to avoid throttling.", - LONG_WAITING_INTERVAL) - time.sleep(LONG_WAITING_INTERVAL) - elif resp.status == httpclient.GONE: - msg = args[0] if len(args) > 0 else "" - raise WireProtocolResourceGone(msg) - else: - return resp - raise ProtocolError(("Calling wire server failed: {0}" - "").format(resp.status)) - - def decode_config(self, data): - if data is None: - return None - data = remove_bom(data) - xml_text = ustr(data, encoding='utf-8') - return xml_text - - def fetch_config(self, uri, headers): - try: - resp = self.call_wireserver(restutil.http_get, uri, - headers=headers) - except HttpError as e: - raise ProtocolError(ustr(e)) - - if(resp.status != httpclient.OK): - raise ProtocolError("{0} - {1}".format(resp.status, uri)) - - return self.decode_config(resp.read()) - - def fetch_cache(self, local_file): - if not os.path.isfile(local_file): - raise ProtocolError("{0} is missing.".format(local_file)) - try: - return fileutil.read_file(local_file) - except IOError as e: - raise ProtocolError("Failed to read cache: {0}".format(e)) - - def save_cache(self, local_file, data): - try: - fileutil.write_file(local_file, data) - except IOError as e: - raise ProtocolError("Failed to write cache: {0}".format(e)) - - def call_storage_service(self, http_req, *args, **kwargs): - """ - Call storage service, handle SERVICE_UNAVAILABLE(503) - """ - for retry in range(0, 3): - resp = http_req(*args, **kwargs) - if resp.status == httpclient.SERVICE_UNAVAILABLE: - logger.warn("Storage service is not avaible temporaryly") - logger.info("Will retry later, in {0} seconds", - LONG_WAITING_INTERVAL) - time.sleep(LONG_WAITING_INTERVAL) - else: - return resp - raise ProtocolError(("Calling storage endpoint failed: {0}" - "").format(resp.status)) - - def fetch_manifest(self, version_uris): - for version_uri in version_uris: - logger.verb("Fetch ext handler manifest: {0}", version_uri.uri) - try: - resp = self.call_storage_service(restutil.http_get, - version_uri.uri, None, - chk_proxy=True) - except HttpError as e: - raise ProtocolError(ustr(e)) - - if resp.status == httpclient.OK: - return self.decode_config(resp.read()) - logger.warn("Failed to fetch ExtensionManifest: {0}, {1}", - resp.status, version_uri.uri) - logger.info("Will retry later, in {0} seconds", - LONG_WAITING_INTERVAL) - time.sleep(LONG_WAITING_INTERVAL) - raise ProtocolError(("Failed to fetch ExtensionManifest from " - "all sources")) - - - def update_hosting_env(self, goal_state): - if goal_state.hosting_env_uri is None: - raise ProtocolError("HostingEnvironmentConfig uri is empty") - local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) - xml_text = self.fetch_config(goal_state.hosting_env_uri, - self.get_header()) - self.save_cache(local_file, xml_text) - self.hosting_env = HostingEnv(xml_text) - - def update_shared_conf(self, goal_state): - if goal_state.shared_conf_uri is None: - raise ProtocolError("SharedConfig uri is empty") - local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) - xml_text = self.fetch_config(goal_state.shared_conf_uri, - self.get_header()) - self.save_cache(local_file, xml_text) - self.shared_conf = SharedConfig(xml_text) - - def update_certs(self, goal_state): - if goal_state.certs_uri is None: - return - local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) - xml_text = self.fetch_config(goal_state.certs_uri, - self.get_header_for_cert()) - self.save_cache(local_file, xml_text) - self.certs = Certificates(self, xml_text) - - def update_ext_conf(self, goal_state): - if goal_state.ext_uri is None: - logger.info("ExtensionsConfig.xml uri is empty") - self.ext_conf = ExtensionsConfig(None) - return - incarnation = goal_state.incarnation - local_file = os.path.join(conf.get_lib_dir(), - EXT_CONF_FILE_NAME.format(incarnation)) - xml_text = self.fetch_config(goal_state.ext_uri, self.get_header()) - self.save_cache(local_file, xml_text) - self.ext_conf = ExtensionsConfig(xml_text) - - def update_goal_state(self, forced=False, max_retry=3): - uri = GOAL_STATE_URI.format(self.endpoint) - xml_text = self.fetch_config(uri, self.get_header()) - goal_state = GoalState(xml_text) - - incarnation_file = os.path.join(conf.get_lib_dir(), - INCARNATION_FILE_NAME) - - if not forced: - last_incarnation = None - if(os.path.isfile(incarnation_file)): - last_incarnation = fileutil.read_file(incarnation_file) - new_incarnation = goal_state.incarnation - if last_incarnation is not None and \ - last_incarnation == new_incarnation: - #Goalstate is not updated. - return - - #Start updating goalstate, retry on 410 - for retry in range(0, max_retry): - try: - self.goal_state = goal_state - file_name = GOAL_STATE_FILE_NAME.format(goal_state.incarnation) - goal_state_file = os.path.join(conf.get_lib_dir(), file_name) - self.save_cache(goal_state_file, xml_text) - self.save_cache(incarnation_file, goal_state.incarnation) - self.update_hosting_env(goal_state) - self.update_shared_conf(goal_state) - self.update_certs(goal_state) - self.update_ext_conf(goal_state) - return - except WireProtocolResourceGone: - logger.info("Incarnation is out of date. Update goalstate.") - xml_text = self.fetch_config(uri, self.get_header()) - goal_state = GoalState(xml_text) - - raise ProtocolError("Exceeded max retry updating goal state") - - def get_goal_state(self): - if(self.goal_state is None): - incarnation_file = os.path.join(conf.get_lib_dir(), - INCARNATION_FILE_NAME) - incarnation = self.fetch_cache(incarnation_file) - - file_name = GOAL_STATE_FILE_NAME.format(incarnation) - goal_state_file = os.path.join(conf.get_lib_dir(), file_name) - xml_text = self.fetch_cache(goal_state_file) - self.goal_state = GoalState(xml_text) - return self.goal_state - - def get_hosting_env(self): - if(self.hosting_env is None): - local_file = os.path.join(conf.get_lib_dir(), HOSTING_ENV_FILE_NAME) - xml_text = self.fetch_cache(local_file) - self.hosting_env = HostingEnv(xml_text) - return self.hosting_env - - def get_shared_conf(self): - if(self.shared_conf is None): - local_file = os.path.join(conf.get_lib_dir(), SHARED_CONF_FILE_NAME) - xml_text = self.fetch_cache(local_file) - self.shared_conf = SharedConfig(xml_text) - return self.shared_conf - - def get_certs(self): - if(self.certs is None): - local_file = os.path.join(conf.get_lib_dir(), CERTS_FILE_NAME) - xml_text = self.fetch_cache(local_file) - self.certs = Certificates(self, xml_text) - if self.certs is None: - return None - return self.certs - - def get_ext_conf(self): - if(self.ext_conf is None): - goal_state = self.get_goal_state() - if goal_state.ext_uri is None: - self.ext_conf = ExtensionsConfig(None) - else: - local_file = EXT_CONF_FILE_NAME.format(goal_state.incarnation) - local_file = os.path.join(conf.get_lib_dir(), local_file) - xml_text = self.fetch_cache(local_file) - self.ext_conf = ExtensionsConfig(xml_text) - return self.ext_conf - - def get_ext_manifest(self, ext_handler, goal_state): - local_file = MANIFEST_FILE_NAME.format(ext_handler.name, - goal_state.incarnation) - local_file = os.path.join(conf.get_lib_dir(), local_file) - xml_text = self.fetch_manifest(ext_handler.versionUris) - self.save_cache(local_file, xml_text) - return ExtensionManifest(xml_text) - - def check_wire_protocol_version(self): - uri = VERSION_INFO_URI.format(self.endpoint) - version_info_xml = self.fetch_config(uri, None) - version_info = VersionInfo(version_info_xml) - - preferred = version_info.get_preferred() - if PROTOCOL_VERSION == preferred: - logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) - elif PROTOCOL_VERSION in version_info.get_supported(): - logger.info("Wire protocol version:{0}", PROTOCOL_VERSION) - logger.warn("Server prefered version:{0}", preferred) - else: - error = ("Agent supported wire protocol version: {0} was not " - "advised by Fabric.").format(PROTOCOL_VERSION) - raise ProtocolNotFoundError(error) - - def upload_status_blob(self): - ext_conf = self.get_ext_conf() - if ext_conf.status_upload_blob is not None: - self.status_blob.upload(ext_conf.status_upload_blob) - - def report_role_prop(self, thumbprint): - goal_state = self.get_goal_state() - role_prop = _build_role_properties(goal_state.container_id, - goal_state.role_instance_id, - thumbprint) - role_prop = role_prop.encode("utf-8") - role_prop_uri = ROLE_PROP_URI.format(self.endpoint) - headers = self.get_header_for_xml_content() - try: - resp = self.call_wireserver(restutil.http_post, role_prop_uri, - role_prop, headers = headers) - except HttpError as e: - raise ProtocolError((u"Failed to send role properties: {0}" - u"").format(e)) - if resp.status != httpclient.ACCEPTED: - raise ProtocolError((u"Failed to send role properties: {0}" - u", {1}").format(resp.status, resp.read())) - - def report_health(self, status, substatus, description): - goal_state = self.get_goal_state() - health_report = _build_health_report(goal_state.incarnation, - goal_state.container_id, - goal_state.role_instance_id, - status, - substatus, - description) - health_report = health_report.encode("utf-8") - health_report_uri = HEALTH_REPORT_URI.format(self.endpoint) - headers = self.get_header_for_xml_content() - try: - resp = self.call_wireserver(restutil.http_post, health_report_uri, - health_report, headers = headers) - except HttpError as e: - raise ProtocolError((u"Failed to send provision status: {0}" - u"").format(e)) - if resp.status != httpclient.OK: - raise ProtocolError((u"Failed to send provision status: {0}" - u", {1}").format(resp.status, resp.read())) - - def send_event(self, provider_id, event_str): - uri = TELEMETRY_URI.format(self.endpoint) - data_format = ('' - '' - '{1}' - '' - '') - data = data_format.format(provider_id, event_str) - try: - header = self.get_header_for_xml_content() - resp = self.call_wireserver(restutil.http_post, uri, data, header) - except HttpError as e: - raise ProtocolError("Failed to send events:{0}".format(e)) - - if resp.status != httpclient.OK: - logger.verb(resp.read()) - raise ProtocolError("Failed to send events:{0}".format(resp.status)) - - def report_event(self, event_list): - buf = {} - #Group events by providerId - for event in event_list.events: - if event.providerId not in buf: - buf[event.providerId] = "" - event_str = event_to_v1(event) - if len(event_str) >= 63 * 1024: - logger.warn("Single event too large: {0}", event_str[300:]) - continue - if len(buf[event.providerId] + event_str) >= 63 * 1024: - self.send_event(event.providerId, buf[event.providerId]) - buf[event.providerId] = "" - buf[event.providerId] = buf[event.providerId] + event_str - - #Send out all events left in buffer. - for provider_id in list(buf.keys()): - if len(buf[provider_id]) > 0: - self.send_event(provider_id, buf[provider_id]) - - def get_header(self): - return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION - } - - def get_header_for_xml_content(self): - return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION, - "Content-Type":"text/xml;charset=utf-8" - } - - def get_header_for_cert(self): - trans_cert_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_CERT_FILE_NAME) - content = self.fetch_cache(trans_cert_file) - cert = get_bytes_from_pem(content) - return { - "x-ms-agent-name":"WALinuxAgent", - "x-ms-version":PROTOCOL_VERSION, - "x-ms-cipher-name": "DES_EDE3_CBC", - "x-ms-guest-agent-public-x509-cert":cert - } - -class VersionInfo(object): - def __init__(self, xml_text): - """ - Query endpoint server for wire protocol version. - Fail if our desired protocol version is not seen. - """ - logger.verb("Load Version.xml") - self.parse(xml_text) - - def parse(self, xml_text): - xml_doc = parse_doc(xml_text) - preferred = find(xml_doc, "Preferred") - self.preferred = findtext(preferred, "Version") - logger.info("Fabric preferred wire protocol version:{0}", self.preferred) - - self.supported = [] - supported = find(xml_doc, "Supported") - supported_version = findall(supported, "Version") - for node in supported_version: - version = gettext(node) - logger.verb("Fabric supported wire protocol version:{0}", version) - self.supported.append(version) - - def get_preferred(self): - return self.preferred - - def get_supported(self): - return self.supported - - -class GoalState(object): - - def __init__(self, xml_text): - if xml_text is None: - raise ValueError("GoalState.xml is None") - logger.verb("Load GoalState.xml") - self.incarnation = None - self.expected_state = None - self.hosting_env_uri = None - self.shared_conf_uri = None - self.certs_uri = None - self.ext_uri = None - self.role_instance_id = None - self.container_id = None - self.load_balancer_probe_port = None - self.parse(xml_text) - - def parse(self, xml_text): - """ - Request configuration data from endpoint server. - """ - self.xml_text = xml_text - xml_doc = parse_doc(xml_text) - self.incarnation = findtext(xml_doc, "Incarnation") - self.expected_state = findtext(xml_doc, "ExpectedState") - self.hosting_env_uri = findtext(xml_doc, "HostingEnvironmentConfig") - self.shared_conf_uri = findtext(xml_doc, "SharedConfig") - self.certs_uri = findtext(xml_doc, "Certificates") - self.ext_uri = findtext(xml_doc, "ExtensionsConfig") - role_instance = find(xml_doc, "RoleInstance") - self.role_instance_id = findtext(role_instance, "InstanceId") - container = find(xml_doc, "Container") - self.container_id = findtext(container, "ContainerId") - lbprobe_ports = find(xml_doc, "LBProbePorts") - self.load_balancer_probe_port = findtext(lbprobe_ports, "Port") - return self - - -class HostingEnv(object): - """ - parse Hosting enviromnet config and store in - HostingEnvironmentConfig.xml - """ - def __init__(self, xml_text): - if xml_text is None: - raise ValueError("HostingEnvironmentConfig.xml is None") - logger.verb("Load HostingEnvironmentConfig.xml") - self.vm_name = None - self.role_name = None - self.deployment_name = None - self.parse(xml_text) - - def parse(self, xml_text): - """ - parse and create HostingEnvironmentConfig.xml. - """ - self.xml_text = xml_text - xml_doc = parse_doc(xml_text) - incarnation = find(xml_doc, "Incarnation") - self.vm_name = getattrib(incarnation, "instance") - role = find(xml_doc, "Role") - self.role_name = getattrib(role, "name") - deployment = find(xml_doc, "Deployment") - self.deployment_name = getattrib(deployment, "name") - return self - -class SharedConfig(object): - """ - parse role endpoint server and goal state config. - """ - def __init__(self, xml_text): - logger.verb("Load SharedConfig.xml") - self.parse(xml_text) - - def parse(self, xml_text): - """ - parse and write configuration to file SharedConfig.xml. - """ - #Not used currently - return self - -class Certificates(object): - - """ - Object containing certificates of host and provisioned user. - """ - def __init__(self, client, xml_text): - logger.verb("Load Certificates.xml") - self.client = client - self.cert_list = CertList() - self.parse(xml_text) - - def parse(self, xml_text): - """ - Parse multiple certificates into seperate files. - """ - xml_doc = parse_doc(xml_text) - data = findtext(xml_doc, "Data") - if data is None: - return - - cryptutil = CryptUtil(conf.get_openssl_cmd()) - p7m_file = os.path.join(conf.get_lib_dir(), P7M_FILE_NAME) - p7m = ("MIME-Version:1.0\n" - "Content-Disposition: attachment; filename=\"{0}\"\n" - "Content-Type: application/x-pkcs7-mime; name=\"{1}\"\n" - "Content-Transfer-Encoding: base64\n" - "\n" - "{2}").format(p7m_file, p7m_file, data) - - self.client.save_cache(p7m_file, p7m) - - trans_prv_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_PRV_FILE_NAME) - trans_cert_file = os.path.join(conf.get_lib_dir(), - TRANSPORT_CERT_FILE_NAME) - pem_file = os.path.join(conf.get_lib_dir(), PEM_FILE_NAME) - #decrypt certificates - cryptutil.decrypt_p7m(p7m_file, trans_prv_file, trans_cert_file, - pem_file) - - #The parsing process use public key to match prv and crt. - buf = [] - begin_crt = False - begin_prv = False - prvs = {} - thumbprints = {} - index = 0 - v1_cert_list = [] - with open(pem_file) as pem: - for line in pem.readlines(): - buf.append(line) - if re.match(r'[-]+BEGIN.*KEY[-]+', line): - begin_prv = True - elif re.match(r'[-]+BEGIN.*CERTIFICATE[-]+', line): - begin_crt = True - elif re.match(r'[-]+END.*KEY[-]+', line): - tmp_file = self.write_to_tmp_file(index, 'prv', buf) - pub = cryptutil.get_pubkey_from_prv(tmp_file) - prvs[pub] = tmp_file - buf = [] - index += 1 - begin_prv = False - elif re.match(r'[-]+END.*CERTIFICATE[-]+', line): - tmp_file = self.write_to_tmp_file(index, 'crt', buf) - pub = cryptutil.get_pubkey_from_crt(tmp_file) - thumbprint = cryptutil.get_thumbprint_from_crt(tmp_file) - thumbprints[pub] = thumbprint - #Rename crt with thumbprint as the file name - crt = "{0}.crt".format(thumbprint) - v1_cert_list.append({ - "name":None, - "thumbprint":thumbprint - }) - os.rename(tmp_file, os.path.join(conf.get_lib_dir(), crt)) - buf = [] - index += 1 - begin_crt = False - - #Rename prv key with thumbprint as the file name - for pubkey in prvs: - thumbprint = thumbprints[pubkey] - if thumbprint: - tmp_file = prvs[pubkey] - prv = "{0}.prv".format(thumbprint) - os.rename(tmp_file, os.path.join(conf.get_lib_dir(), prv)) - - for v1_cert in v1_cert_list: - cert = Cert() - set_properties("certs", cert, v1_cert) - self.cert_list.certificates.append(cert) - - def write_to_tmp_file(self, index, suffix, buf): - file_name = os.path.join(conf.get_lib_dir(), - "{0}.{1}".format(index, suffix)) - self.client.save_cache(file_name, "".join(buf)) - return file_name - - -class ExtensionsConfig(object): - """ - parse ExtensionsConfig, downloading and unpacking them to /var/lib/waagent. - Install if true, remove if it is set to false. - """ - - def __init__(self, xml_text): - logger.verb("Load ExtensionsConfig.xml") - self.ext_handlers = ExtHandlerList() - self.status_upload_blob = None - if xml_text is not None: - self.parse(xml_text) - - def parse(self, xml_text): - """ - Write configuration to file ExtensionsConfig.xml. - """ - xml_doc = parse_doc(xml_text) - plugins_list = find(xml_doc, "Plugins") - plugins = findall(plugins_list, "Plugin") - plugin_settings_list = find(xml_doc, "PluginSettings") - plugin_settings = findall(plugin_settings_list, "Plugin") - - for plugin in plugins: - ext_handler = self.parse_plugin(plugin) - self.ext_handlers.extHandlers.append(ext_handler) - self.parse_plugin_settings(ext_handler, plugin_settings) - - self.status_upload_blob = findtext(xml_doc, "StatusUploadBlob") - - def parse_plugin(self, plugin): - ext_handler = ExtHandler() - ext_handler.name = getattrib(plugin, "name") - ext_handler.properties.version = getattrib(plugin, "version") - ext_handler.properties.state = getattrib(plugin, "state") - - auto_upgrade = getattrib(plugin, "autoUpgrade") - if auto_upgrade is not None and auto_upgrade.lower() == "true": - ext_handler.properties.upgradePolicy = "auto" - else: - ext_handler.properties.upgradePolicy = "manual" - - location = getattrib(plugin, "location") - failover_location = getattrib(plugin, "failoverlocation") - for uri in [location, failover_location]: - version_uri = ExtHandlerVersionUri() - version_uri.uri = uri - ext_handler.versionUris.append(version_uri) - return ext_handler - - def parse_plugin_settings(self, ext_handler, plugin_settings): - if plugin_settings is None: - return - - name = ext_handler.name - version = ext_handler.properties.version - settings = [x for x in plugin_settings \ - if getattrib(x, "name") == name and \ - getattrib(x ,"version") == version] - - if settings is None or len(settings) == 0: - return - - runtime_settings = None - runtime_settings_node = find(settings[0], "RuntimeSettings") - seqNo = getattrib(runtime_settings_node, "seqNo") - runtime_settings_str = gettext(runtime_settings_node) - try: - runtime_settings = json.loads(runtime_settings_str) - except ValueError as e: - logger.error("Invalid extension settings") - return - - for plugin_settings_list in runtime_settings["runtimeSettings"]: - handler_settings = plugin_settings_list["handlerSettings"] - ext = Extension() - #There is no "extension name" in wire protocol. - #Put - ext.name = ext_handler.name - ext.sequenceNumber = seqNo - ext.publicSettings = handler_settings.get("publicSettings") - ext.protectedSettings = handler_settings.get("protectedSettings") - thumbprint = handler_settings.get("protectedSettingsCertThumbprint") - ext.certificateThumbprint = thumbprint - ext_handler.properties.extensions.append(ext) - -class ExtensionManifest(object): - def __init__(self, xml_text): - if xml_text is None: - raise ValueError("ExtensionManifest is None") - logger.verb("Load ExtensionManifest.xml") - self.pkg_list = ExtHandlerPackageList() - self.parse(xml_text) - - def parse(self, xml_text): - xml_doc = parse_doc(xml_text) - packages = findall(xml_doc, "Plugin") - for package in packages: - version = findtext(package, "Version") - uris = find(package, "Uris") - uri_list = findall(uris, "Uri") - uri_list = [gettext(x) for x in uri_list] - package = ExtHandlerPackage() - package.version = version - for uri in uri_list: - pkg_uri = ExtHandlerVersionUri() - pkg_uri.uri = uri - package.uris.append(pkg_uri) - self.pkg_list.versions.append(package) - diff --git a/azurelinuxagent/utils/__init__.py b/azurelinuxagent/utils/__init__.py deleted file mode 100644 index d9b82f5..0000000 --- a/azurelinuxagent/utils/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - diff --git a/azurelinuxagent/utils/cryptutil.py b/azurelinuxagent/utils/cryptutil.py deleted file mode 100644 index 5ee5637..0000000 --- a/azurelinuxagent/utils/cryptutil.py +++ /dev/null @@ -1,121 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import base64 -import struct -from azurelinuxagent.future import ustr, bytebuffer -from azurelinuxagent.exception import CryptError -import azurelinuxagent.utils.shellutil as shellutil - -class CryptUtil(object): - def __init__(self, openssl_cmd): - self.openssl_cmd = openssl_cmd - - def gen_transport_cert(self, prv_file, crt_file): - """ - Create ssl certificate for https communication with endpoint server. - """ - cmd = ("{0} req -x509 -nodes -subj /CN=LinuxTransport -days 32768 " - "-newkey rsa:2048 -keyout {1} " - "-out {2}").format(self.openssl_cmd, prv_file, crt_file) - shellutil.run(cmd) - - def get_pubkey_from_prv(self, file_name): - cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub - - def get_pubkey_from_crt(self, file_name): - cmd = "{0} x509 -in {1} -pubkey -noout".format(self.openssl_cmd, - file_name) - pub = shellutil.run_get_output(cmd)[1] - return pub - - def get_thumbprint_from_crt(self, file_name): - cmd="{0} x509 -in {1} -fingerprint -noout".format(self.openssl_cmd, - file_name) - thumbprint = shellutil.run_get_output(cmd)[1] - thumbprint = thumbprint.rstrip().split('=')[1].replace(':', '').upper() - return thumbprint - - def decrypt_p7m(self, p7m_file, trans_prv_file, trans_cert_file, pem_file): - cmd = ("{0} cms -decrypt -in {1} -inkey {2} -recip {3} " - "| {4} pkcs12 -nodes -password pass: -out {5}" - "").format(self.openssl_cmd, p7m_file, trans_prv_file, - trans_cert_file, self.openssl_cmd, pem_file) - shellutil.run(cmd) - - def crt_to_ssh(self, input_file, output_file): - shellutil.run("ssh-keygen -i -m PKCS8 -f {0} >> {1}".format(input_file, - output_file)) - - def asn1_to_ssh(self, pubkey): - lines = pubkey.split("\n") - lines = [x for x in lines if not x.startswith("----")] - base64_encoded = "".join(lines) - try: - #TODO remove pyasn1 dependency - from pyasn1.codec.der import decoder as der_decoder - der_encoded = base64.b64decode(base64_encoded) - der_encoded = der_decoder.decode(der_encoded)[0][1] - key = der_decoder.decode(self.bits_to_bytes(der_encoded))[0] - n=key[0] - e=key[1] - keydata = bytearray() - keydata.extend(struct.pack('>I', len("ssh-rsa"))) - keydata.extend(b"ssh-rsa") - keydata.extend(struct.pack('>I', len(self.num_to_bytes(e)))) - keydata.extend(self.num_to_bytes(e)) - keydata.extend(struct.pack('>I', len(self.num_to_bytes(n)) + 1)) - keydata.extend(b"\0") - keydata.extend(self.num_to_bytes(n)) - keydata_base64 = base64.b64encode(bytebuffer(keydata)) - return ustr(b"ssh-rsa " + keydata_base64 + b"\n", - encoding='utf-8') - except ImportError as e: - raise CryptError("Failed to load pyasn1.codec.der") - - def num_to_bytes(self, num): - """ - Pack number into bytes. Retun as string. - """ - result = bytearray() - while num: - result.append(num & 0xFF) - num >>= 8 - result.reverse() - return result - - def bits_to_bytes(self, bits): - """ - Convert an array contains bits, [0,1] to a byte array - """ - index = 7 - byte_array = bytearray() - curr = 0 - for bit in bits: - curr = curr | (bit << index) - index = index - 1 - if index == -1: - byte_array.append(curr) - curr = 0 - index = 7 - return bytes(byte_array) - diff --git a/azurelinuxagent/utils/fileutil.py b/azurelinuxagent/utils/fileutil.py deleted file mode 100644 index 5369a7c..0000000 --- a/azurelinuxagent/utils/fileutil.py +++ /dev/null @@ -1,187 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -""" -File operation util functions -""" - -import os -import re -import shutil -import pwd -import tempfile -import azurelinuxagent.logger as logger -from azurelinuxagent.future import ustr -import azurelinuxagent.utils.textutil as textutil - -def read_file(filepath, asbin=False, remove_bom=False, encoding='utf-8'): - """ - Read and return contents of 'filepath'. - """ - mode = 'rb' - with open(filepath, mode) as in_file: - data = in_file.read() - if data is None: - return None - - if asbin: - return data - - if remove_bom: - #Remove bom on bytes data before it is converted into string. - data = textutil.remove_bom(data) - data = ustr(data, encoding=encoding) - return data - -def write_file(filepath, contents, asbin=False, encoding='utf-8', append=False): - """ - Write 'contents' to 'filepath'. - """ - mode = "ab" if append else "wb" - data = contents - if not asbin: - data = contents.encode(encoding) - with open(filepath, mode) as out_file: - out_file.write(data) - -def append_file(filepath, contents, asbin=False, encoding='utf-8'): - """ - Append 'contents' to 'filepath'. - """ - write_file(filepath, contents, asbin=asbin, encoding=encoding, append=True) - -def replace_file(filepath, contents): - """ - Write 'contents' to 'filepath' by creating a temp file, - and replacing original. - """ - handle, temp = tempfile.mkstemp(dir=os.path.dirname(filepath)) - #if type(contents) == str: - #contents=contents.encode('latin-1') - try: - os.write(handle, contents) - except IOError as err: - logger.error('Write to file {0}, Exception is {1}', filepath, err) - return 1 - finally: - os.close(handle) - - try: - os.rename(temp, filepath) - except IOError as err: - logger.info('Rename {0} to {1}, Exception is {2}', temp, filepath, err) - logger.info('Remove original file and retry') - try: - os.remove(filepath) - except IOError as err: - logger.error('Remove {0}, Exception is {1}', temp, filepath, err) - - try: - os.rename(temp, filepath) - except IOError as err: - logger.error('Rename {0} to {1}, Exception is {2}', temp, filepath, - err) - return 1 - return 0 - - -def base_name(path): - head, tail = os.path.split(path) - return tail - -def get_line_startingwith(prefix, filepath): - """ - Return line from 'filepath' if the line startswith 'prefix' - """ - for line in read_file(filepath).split('\n'): - if line.startswith(prefix): - return line - return None - -#End File operation util functions - -def mkdir(dirpath, mode=None, owner=None): - if not os.path.isdir(dirpath): - os.makedirs(dirpath) - if mode is not None: - chmod(dirpath, mode) - if owner is not None: - chowner(dirpath, owner) - -def chowner(path, owner): - owner_info = pwd.getpwnam(owner) - os.chown(path, owner_info[2], owner_info[3]) - -def chmod(path, mode): - os.chmod(path, mode) - -def rm_files(*args): - for path in args: - if os.path.isfile(path): - os.remove(path) - -def rm_dirs(*args): - """ - Remove all the contents under the directry - """ - for dir_name in args: - if os.path.isdir(dir_name): - for item in os.listdir(dir_name): - path = os.path.join(dir_name, item) - if os.path.isfile(path): - os.remove(path) - elif os.path.isdir(path): - shutil.rmtree(path) - -def update_conf_file(path, line_start, val, chk_err=False): - conf = [] - if not os.path.isfile(path) and chk_err: - raise IOError("Can't find config file:{0}".format(path)) - conf = read_file(path).split('\n') - conf = [x for x in conf if not x.startswith(line_start)] - conf.append(val) - replace_file(path, '\n'.join(conf)) - -def search_file(target_dir_name, target_file_name): - for root, dirs, files in os.walk(target_dir_name): - for file_name in files: - if file_name == target_file_name: - return os.path.join(root, file_name) - return None - -def chmod_tree(path, mode): - for root, dirs, files in os.walk(path): - for file_name in files: - os.chmod(os.path.join(root, file_name), mode) - -def findstr_in_file(file_path, pattern_str): - """ - Return match object if found in file. - """ - try: - pattern = re.compile(pattern_str) - for line in (open(file_path, 'r')).readlines(): - match = re.search(pattern, line) - if match: - return match - except: - raise - - return None - diff --git a/azurelinuxagent/utils/restutil.py b/azurelinuxagent/utils/restutil.py deleted file mode 100644 index 2e8b0be..0000000 --- a/azurelinuxagent/utils/restutil.py +++ /dev/null @@ -1,156 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import time -import platform -import os -import subprocess -import azurelinuxagent.conf as conf -import azurelinuxagent.logger as logger -from azurelinuxagent.exception import HttpError -from azurelinuxagent.future import httpclient, urlparse - -""" -REST api util functions -""" - -RETRY_WAITING_INTERVAL = 10 - -def _parse_url(url): - o = urlparse(url) - rel_uri = o.path - if o.fragment: - rel_uri = "{0}#{1}".format(rel_uri, o.fragment) - if o.query: - rel_uri = "{0}?{1}".format(rel_uri, o.query) - secure = False - if o.scheme.lower() == "https": - secure = True - return o.hostname, o.port, secure, rel_uri - -def get_http_proxy(): - """ - Get http_proxy and https_proxy from environment variables. - Username and password is not supported now. - """ - host = conf.get_httpproxy_host() - port = conf.get_httpproxy_port() - return (host, port) - -def _http_request(method, host, rel_uri, port=None, data=None, secure=False, - headers=None, proxy_host=None, proxy_port=None): - url, conn = None, None - if secure: - port = 443 if port is None else port - if proxy_host is not None and proxy_port is not None: - conn = httpclient.HTTPSConnection(proxy_host, proxy_port, timeout=10) - conn.set_tunnel(host, port) - #If proxy is used, full url is needed. - url = "https://{0}:{1}{2}".format(host, port, rel_uri) - else: - conn = httpclient.HTTPSConnection(host, port, timeout=10) - url = rel_uri - else: - port = 80 if port is None else port - if proxy_host is not None and proxy_port is not None: - conn = httpclient.HTTPConnection(proxy_host, proxy_port, timeout=10) - #If proxy is used, full url is needed. - url = "http://{0}:{1}{2}".format(host, port, rel_uri) - else: - conn = httpclient.HTTPConnection(host, port, timeout=10) - url = rel_uri - if headers == None: - conn.request(method, url, data) - else: - conn.request(method, url, data, headers) - resp = conn.getresponse() - return resp - -def http_request(method, url, data, headers=None, max_retry=3, chk_proxy=False): - """ - Sending http request to server - On error, sleep 10 and retry max_retry times. - """ - logger.verb("HTTP Req: {0} {1}", method, url) - logger.verb(" Data={0}", data) - logger.verb(" Header={0}", headers) - host, port, secure, rel_uri = _parse_url(url) - - #Check proxy - proxy_host, proxy_port = (None, None) - if chk_proxy: - proxy_host, proxy_port = get_http_proxy() - - #If httplib module is not built with ssl support. Fallback to http - if secure and not hasattr(httpclient, "HTTPSConnection"): - logger.warn("httplib is not built with ssl support") - secure = False - - #If httplib module doesn't support https tunnelling. Fallback to http - if secure and \ - proxy_host is not None and \ - proxy_port is not None and \ - not hasattr(httpclient.HTTPSConnection, "set_tunnel"): - logger.warn("httplib doesn't support https tunnelling(new in python 2.7)") - secure = False - - for retry in range(0, max_retry): - try: - resp = _http_request(method, host, rel_uri, port=port, data=data, - secure=secure, headers=headers, - proxy_host=proxy_host, proxy_port=proxy_port) - logger.verb("HTTP Resp: Status={0}", resp.status) - logger.verb(" Header={0}", resp.getheaders()) - return resp - except httpclient.HTTPException as e: - logger.warn('HTTPException {0}, args:{1}', e, repr(e.args)) - except IOError as e: - logger.warn('Socket IOError {0}, args:{1}', e, repr(e.args)) - - if retry < max_retry - 1: - logger.info("Retry={0}, {1} {2}", retry, method, url) - time.sleep(RETRY_WAITING_INTERVAL) - - if url is not None and len(url) > 100: - url_log = url[0: 100] #In case the url is too long - else: - url_log = url - raise HttpError("HTTP Err: {0} {1}".format(method, url_log)) - -def http_get(url, headers=None, max_retry=3, chk_proxy=False): - return http_request("GET", url, data=None, headers=headers, - max_retry=max_retry, chk_proxy=chk_proxy) - -def http_head(url, headers=None, max_retry=3, chk_proxy=False): - return http_request("HEAD", url, None, headers=headers, - max_retry=max_retry, chk_proxy=chk_proxy) - -def http_post(url, data, headers=None, max_retry=3, chk_proxy=False): - return http_request("POST", url, data, headers=headers, - max_retry=max_retry, chk_proxy=chk_proxy) - -def http_put(url, data, headers=None, max_retry=3, chk_proxy=False): - return http_request("PUT", url, data, headers=headers, - max_retry=max_retry, chk_proxy=chk_proxy) - -def http_delete(url, headers=None, max_retry=3, chk_proxy=False): - return http_request("DELETE", url, None, headers=headers, - max_retry=max_retry, chk_proxy=chk_proxy) - -#End REST api util functions diff --git a/azurelinuxagent/utils/shellutil.py b/azurelinuxagent/utils/shellutil.py deleted file mode 100644 index 98871a1..0000000 --- a/azurelinuxagent/utils/shellutil.py +++ /dev/null @@ -1,89 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ -# - -import platform -import os -import subprocess -from azurelinuxagent.future import ustr -import azurelinuxagent.logger as logger - -if not hasattr(subprocess,'check_output'): - def check_output(*popenargs, **kwargs): - r"""Backport from subprocess module from python 2.7""" - if 'stdout' in kwargs: - raise ValueError('stdout argument not allowed, ' - 'it will be overridden.') - process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs) - output, unused_err = process.communicate() - retcode = process.poll() - if retcode: - cmd = kwargs.get("args") - if cmd is None: - cmd = popenargs[0] - raise subprocess.CalledProcessError(retcode, cmd, output=output) - return output - - # Exception classes used by this module. - class CalledProcessError(Exception): - def __init__(self, returncode, cmd, output=None): - self.returncode = returncode - self.cmd = cmd - self.output = output - def __str__(self): - return ("Command '{0}' returned non-zero exit status {1}" - "").format(self.cmd, self.returncode) - - subprocess.check_output=check_output - subprocess.CalledProcessError=CalledProcessError - - -""" -Shell command util functions -""" -def run(cmd, chk_err=True): - """ - Calls run_get_output on 'cmd', returning only the return code. - If chk_err=True then errors will be reported in the log. - If chk_err=False then errors will be suppressed from the log. - """ - retcode,out=run_get_output(cmd,chk_err) - return retcode - -def run_get_output(cmd, chk_err=True, log_cmd=True): - """ - Wrapper for subprocess.check_output. - Execute 'cmd'. Returns return code and STDOUT, trapping expected exceptions. - Reports exceptions to Error if chk_err parameter is True - """ - if log_cmd: - logger.verb(u"run cmd '{0}'", cmd) - try: - output=subprocess.check_output(cmd,stderr=subprocess.STDOUT,shell=True) - output = ustr(output, encoding='utf-8', errors="backslashreplace") - except subprocess.CalledProcessError as e : - output = ustr(e.output, encoding='utf-8', errors="backslashreplace") - if chk_err: - if log_cmd: - logger.error(u"run cmd '{0}' failed", e.cmd) - logger.error(u"Error Code:{0}", e.returncode) - logger.error(u"Result:{0}", output) - return e.returncode, output - return 0, output - -#End shell command util functions diff --git a/azurelinuxagent/utils/textutil.py b/azurelinuxagent/utils/textutil.py deleted file mode 100644 index 851f98a..0000000 --- a/azurelinuxagent/utils/textutil.py +++ /dev/null @@ -1,236 +0,0 @@ -# Microsoft Azure Linux Agent -# -# Copyright 2014 Microsoft Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# Requires Python 2.4+ and Openssl 1.0+ - -import crypt -import random -import string -import struct -import xml.dom.minidom as minidom -import sys -from distutils.version import LooseVersion - -def parse_doc(xml_text): - """ - Parse xml document from string - """ - #The minidom lib has some issue with unicode in python2. - #Encode the string into utf-8 first - xml_text = xml_text.encode('utf-8') - return minidom.parseString(xml_text) - -def findall(root, tag, namespace=None): - """ - Get all nodes by tag and namespace under Node root. - """ - if root is None: - return [] - - if namespace is None: - return root.getElementsByTagName(tag) - else: - return root.getElementsByTagNameNS(namespace, tag) - -def find(root, tag, namespace=None): - """ - Get first node by tag and namespace under Node root. - """ - nodes = findall(root, tag, namespace=namespace) - if nodes is not None and len(nodes) >= 1: - return nodes[0] - else: - return None - -def gettext(node): - """ - Get node text - """ - if node is None: - return None - - for child in node.childNodes: - if child.nodeType == child.TEXT_NODE: - return child.data - return None - -def findtext(root, tag, namespace=None): - """ - Get text of node by tag and namespace under Node root. - """ - node = find(root, tag, namespace=namespace) - return gettext(node) - -def getattrib(node, attr_name): - """ - Get attribute of xml node - """ - if node is not None: - return node.getAttribute(attr_name) - else: - return None - -def unpack(buf, offset, range): - """ - Unpack bytes into python values. - """ - result = 0 - for i in range: - result = (result << 8) | str_to_ord(buf[offset + i]) - return result - -def unpack_little_endian(buf, offset, length): - """ - Unpack little endian bytes into python values. - """ - return unpack(buf, offset, list(range(length - 1, -1, -1))) - -def unpack_big_endian(buf, offset, length): - """ - Unpack big endian bytes into python values. - """ - return unpack(buf, offset, list(range(0, length))) - -def hex_dump3(buf, offset, length): - """ - Dump range of buf in formatted hex. - """ - return ''.join(['%02X' % str_to_ord(char) for char in buf[offset:offset + length]]) - -def hex_dump2(buf): - """ - Dump buf in formatted hex. - """ - return hex_dump3(buf, 0, len(buf)) - -def is_in_range(a, low, high): - """ - Return True if 'a' in 'low' <= a >= 'high' - """ - return (a >= low and a <= high) - -def is_printable(ch): - """ - Return True if character is displayable. - """ - return (is_in_range(ch, str_to_ord('A'), str_to_ord('Z')) - or is_in_range(ch, str_to_ord('a'), str_to_ord('z')) - or is_in_range(ch, str_to_ord('0'), str_to_ord('9'))) - -def hex_dump(buffer, size): - """ - Return Hex formated dump of a 'buffer' of 'size'. - """ - if size < 0: - size = len(buffer) - result = "" - for i in range(0, size): - if (i % 16) == 0: - result += "%06X: " % i - byte = buffer[i] - if type(byte) == str: - byte = ord(byte.decode('latin1')) - result += "%02X " % byte - if (i & 15) == 7: - result += " " - if ((i + 1) % 16) == 0 or (i + 1) == size: - j = i - while ((j + 1) % 16) != 0: - result += " " - if (j & 7) == 7: - result += " " - j += 1 - result += " " - for j in range(i - (i % 16), i + 1): - byte=buffer[j] - if type(byte) == str: - byte = str_to_ord(byte.decode('latin1')) - k = '.' - if is_printable(byte): - k = chr(byte) - result += k - if (i + 1) != size: - result += "\n" - return result - -def str_to_ord(a): - """ - Allows indexing into a string or an array of integers transparently. - Generic utility function. - """ - if type(a) == type(b'') or type(a) == type(u''): - a = ord(a) - return a - -def compare_bytes(a, b, start, length): - for offset in range(start, start + length): - if str_to_ord(a[offset]) != str_to_ord(b[offset]): - return False - return True - -def int_to_ip4_addr(a): - """ - Build DHCP request string. - """ - return "%u.%u.%u.%u" % ((a >> 24) & 0xFF, - (a >> 16) & 0xFF, - (a >> 8) & 0xFF, - (a) & 0xFF) - -def hexstr_to_bytearray(a): - """ - Return hex string packed into a binary struct. - """ - b = b"" - for c in range(0, len(a) // 2): - b += struct.pack("B", int(a[c * 2:c * 2 + 2], 16)) - return b - -def set_ssh_config(config, name, val): - notfound = True - for i in range(0, len(config)): - if config[i].startswith(name): - config[i] = "{0} {1}".format(name, val) - notfound = False - elif config[i].startswith("Match"): - #Match block must be put in the end of sshd config - break - if notfound: - config.insert(i, "{0} {1}".format(name, val)) - return config - -def remove_bom(c): - if str_to_ord(c[0]) > 128 and str_to_ord(c[1]) > 128 and \ - str_to_ord(c[2]) > 128: - c = c[3:] - return c - -def gen_password_hash(password, crypt_id, salt_len): - collection = string.ascii_letters + string.digits - salt = ''.join(random.choice(collection) for _ in range(salt_len)) - salt = "${0}${1}".format(crypt_id, salt) - return crypt.crypt(password, salt) - -def get_bytes_from_pem(pem_str): - base64_bytes = "" - for line in pem_str.split('\n'): - if "----" not in line: - base64_bytes += line - return base64_bytes - - -Version = LooseVersion - -- cgit v1.2.3