summaryrefslogtreecommitdiff
path: root/cloudinit/sources
diff options
context:
space:
mode:
authorScott Moser <smoser@ubuntu.com>2016-04-04 12:07:19 -0400
committerScott Moser <smoser@ubuntu.com>2016-04-04 12:07:19 -0400
commit7d8a3194552387fa9e21216bcd9a3bfc76fa2b04 (patch)
treec8dc45b013208a4e5e09e6ade63b3b5994f80aa3 /cloudinit/sources
parent93f5af9f5075a416c65c1d0350c374e16f32f0d5 (diff)
parent210b041b2fead7a57af91f60a6f89d9e5aa1ed4a (diff)
downloadvyos-cloud-init-7d8a3194552387fa9e21216bcd9a3bfc76fa2b04.tar.gz
vyos-cloud-init-7d8a3194552387fa9e21216bcd9a3bfc76fa2b04.zip
merge with trunk
Diffstat (limited to 'cloudinit/sources')
-rw-r--r--cloudinit/sources/DataSourceAltCloud.py39
-rw-r--r--cloudinit/sources/DataSourceAzure.py324
-rw-r--r--cloudinit/sources/DataSourceBigstep.py57
-rw-r--r--cloudinit/sources/DataSourceCloudSigma.py22
-rw-r--r--cloudinit/sources/DataSourceCloudStack.py80
-rw-r--r--cloudinit/sources/DataSourceConfigDrive.py145
-rw-r--r--cloudinit/sources/DataSourceDigitalOcean.py23
-rw-r--r--cloudinit/sources/DataSourceEc2.py23
-rw-r--r--cloudinit/sources/DataSourceGCE.py103
-rw-r--r--cloudinit/sources/DataSourceMAAS.py213
-rw-r--r--cloudinit/sources/DataSourceNoCloud.py131
-rw-r--r--cloudinit/sources/DataSourceNone.py4
-rw-r--r--cloudinit/sources/DataSourceOVF.py141
-rw-r--r--cloudinit/sources/DataSourceOpenNebula.py20
-rw-r--r--cloudinit/sources/DataSourceOpenStack.py4
-rw-r--r--cloudinit/sources/DataSourceSmartOS.py337
-rw-r--r--cloudinit/sources/__init__.py62
-rw-r--r--cloudinit/sources/helpers/azure.py278
-rw-r--r--cloudinit/sources/helpers/openstack.py21
-rw-r--r--cloudinit/sources/helpers/vmware/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/__init__.py13
-rw-r--r--cloudinit/sources/helpers/vmware/imc/boot_proto.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config.py95
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_file.py129
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_namespace.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_nic.py247
-rw-r--r--cloudinit/sources/helpers/vmware/imc/config_source.py23
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_error.py24
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_event.py27
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_state.py25
-rw-r--r--cloudinit/sources/helpers/vmware/imc/guestcust_util.py128
-rw-r--r--cloudinit/sources/helpers/vmware/imc/ipv4_mode.py45
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic.py147
-rw-r--r--cloudinit/sources/helpers/vmware/imc/nic_base.py154
34 files changed, 2549 insertions, 598 deletions
diff --git a/cloudinit/sources/DataSourceAltCloud.py b/cloudinit/sources/DataSourceAltCloud.py
index 1e913a6e..cd61df31 100644
--- a/cloudinit/sources/DataSourceAltCloud.py
+++ b/cloudinit/sources/DataSourceAltCloud.py
@@ -40,9 +40,8 @@ LOG = logging.getLogger(__name__)
CLOUD_INFO_FILE = '/etc/sysconfig/cloud-info'
# Shell command lists
-CMD_DMI_SYSTEM = ['/usr/sbin/dmidecode', '--string', 'system-product-name']
CMD_PROBE_FLOPPY = ['/sbin/modprobe', 'floppy']
-CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--quiet', '--timeout=5']
+CMD_UDEVADM_SETTLE = ['/sbin/udevadm', 'settle', '--timeout=5']
META_DATA_NOT_SUPPORTED = {
'block-device-mapping': {},
@@ -100,11 +99,7 @@ class DataSourceAltCloud(sources.DataSource):
'''
Description:
Get the type for the cloud back end this instance is running on
- by examining the string returned by:
- dmidecode --string system-product-name
-
- On VMWare/vSphere dmidecode returns: RHEV Hypervisor
- On VMWare/vSphere dmidecode returns: VMware Virtual Platform
+ by examining the string returned by reading the dmi data.
Input:
None
@@ -117,26 +112,20 @@ class DataSourceAltCloud(sources.DataSource):
uname_arch = os.uname()[4]
if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+ # Disabling because dmi data is not available on ARM processors
LOG.debug("Disabling AltCloud datasource on arm (LP: #1243287)")
return 'UNKNOWN'
- cmd = CMD_DMI_SYSTEM
- try:
- (cmd_out, _err) = util.subp(cmd)
- except ProcessExecutionError, _err:
- LOG.debug(('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message))
- return 'UNKNOWN'
- except OSError, _err:
- LOG.debug(('Failed command: %s\n%s') % \
- (' '.join(cmd), _err.message))
+ system_name = util.read_dmi_data("system-product-name")
+ if not system_name:
return 'UNKNOWN'
- if cmd_out.upper().startswith('RHEV'):
+ sys_name = system_name.upper()
+
+ if sys_name.startswith('RHEV'):
return 'RHEV'
- if cmd_out.upper().startswith('VMWARE'):
+ if sys_name.startswith('VMWARE'):
return 'VSPHERE'
return 'UNKNOWN'
@@ -211,11 +200,11 @@ class DataSourceAltCloud(sources.DataSource):
cmd = CMD_PROBE_FLOPPY
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError, _err:
+ except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
- except OSError, _err:
+ except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
@@ -228,11 +217,11 @@ class DataSourceAltCloud(sources.DataSource):
cmd.append('--exit-if-exists=' + floppy_dev)
(cmd_out, _err) = util.subp(cmd)
LOG.debug(('Command: %s\nOutput%s') % (' '.join(cmd), cmd_out))
- except ProcessExecutionError, _err:
+ except ProcessExecutionError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
- except OSError, _err:
+ except OSError as _err:
util.logexc(LOG, 'Failed command: %s\n%s', ' '.join(cmd),
_err.message)
return False
@@ -295,7 +284,7 @@ class DataSourceAltCloud(sources.DataSource):
# In the future 'dsmode' like behavior can be added to offer user
# the ability to run before networking.
datasources = [
- (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAltCloud, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceAzure.py b/cloudinit/sources/DataSourceAzure.py
index 09bc196d..698f4cac 100644
--- a/cloudinit/sources/DataSourceAzure.py
+++ b/cloudinit/sources/DataSourceAzure.py
@@ -17,26 +17,30 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
+import contextlib
import crypt
import fnmatch
import os
import os.path
import time
+import xml.etree.ElementTree as ET
+
from xml.dom import minidom
from cloudinit import log as logging
from cloudinit.settings import PER_ALWAYS
from cloudinit import sources
from cloudinit import util
+from cloudinit.sources.helpers.azure import get_metadata_from_fabric
LOG = logging.getLogger(__name__)
DS_NAME = 'Azure'
DEFAULT_METADATA = {"instance-id": "iid-AZURE-NODE"}
AGENT_START = ['service', 'walinuxagent', 'start']
-BOUNCE_COMMAND = ['sh', '-xc',
+BOUNCE_COMMAND = [
+ 'sh', '-xc',
"i=$interface; x=0; ifdown $i || x=$?; ifup $i || x=$?; exit $x"]
-DATA_DIR_CLEAN_LIST = ['SharedConfig.xml']
BUILTIN_DS_CONFIG = {
'agent_command': AGENT_START,
@@ -53,9 +57,9 @@ BUILTIN_DS_CONFIG = {
BUILTIN_CLOUD_CONFIG = {
'disk_setup': {
- 'ephemeral0': {'table_type': 'mbr',
- 'layout': True,
- 'overwrite': False},
+ 'ephemeral0': {'table_type': 'gpt',
+ 'layout': [100],
+ 'overwrite': True},
},
'fs_setup': [{'filesystem': 'ext4',
'device': 'ephemeral0.1',
@@ -65,6 +69,40 @@ BUILTIN_CLOUD_CONFIG = {
DS_CFG_PATH = ['datasource', DS_NAME]
DEF_EPHEMERAL_LABEL = 'Temporary Storage'
+# The redacted password fails to meet password complexity requirements
+# so we can safely use this to mask/redact the password in the ovf-env.xml
+DEF_PASSWD_REDACTION = 'REDACTED'
+
+
+def get_hostname(hostname_command='hostname'):
+ return util.subp(hostname_command, capture=True)[0].strip()
+
+
+def set_hostname(hostname, hostname_command='hostname'):
+ util.subp([hostname_command, hostname])
+
+
+@contextlib.contextmanager
+def temporary_hostname(temp_hostname, cfg, hostname_command='hostname'):
+ """
+ Set a temporary hostname, restoring the previous hostname on exit.
+
+ Will have the value of the previous hostname when used as a context
+ manager, or None if the hostname was not changed.
+ """
+ policy = cfg['hostname_bounce']['policy']
+ previous_hostname = get_hostname(hostname_command)
+ if (not util.is_true(cfg.get('set_hostname')) or
+ util.is_false(policy) or
+ (previous_hostname == temp_hostname and policy != 'force')):
+ yield None
+ return
+ set_hostname(temp_hostname, hostname_command)
+ try:
+ yield previous_hostname
+ finally:
+ set_hostname(previous_hostname, hostname_command)
+
class DataSourceAzureNet(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
@@ -80,6 +118,54 @@ class DataSourceAzureNet(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def get_metadata_from_agent(self):
+ temp_hostname = self.metadata.get('local-hostname')
+ hostname_command = self.ds_cfg['hostname_bounce']['hostname_command']
+ with temporary_hostname(temp_hostname, self.ds_cfg,
+ hostname_command=hostname_command) \
+ as previous_hostname:
+ if (previous_hostname is not None and
+ util.is_true(self.ds_cfg.get('set_hostname'))):
+ cfg = self.ds_cfg['hostname_bounce']
+ try:
+ perform_hostname_bounce(hostname=temp_hostname,
+ cfg=cfg,
+ prev_hostname=previous_hostname)
+ except Exception as e:
+ LOG.warn("Failed publishing hostname: %s", e)
+ util.logexc(LOG, "handling set_hostname failed")
+
+ try:
+ invoke_agent(self.ds_cfg['agent_command'])
+ except util.ProcessExecutionError:
+ # claim the datasource even if the command failed
+ util.logexc(LOG, "agent command '%s' failed.",
+ self.ds_cfg['agent_command'])
+
+ ddir = self.ds_cfg['data_dir']
+
+ fp_files = []
+ key_value = None
+ for pk in self.cfg.get('_pubkeys', []):
+ if pk.get('value', None):
+ key_value = pk['value']
+ LOG.debug("ssh authentication: using value from fabric")
+ else:
+ bname = str(pk['fingerprint'] + ".crt")
+ fp_files += [os.path.join(ddir, bname)]
+ LOG.debug("ssh authentication: "
+ "using fingerprint from fabirc")
+
+ missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
+ func=wait_for_files,
+ args=(fp_files,))
+ if len(missing):
+ LOG.warn("Did not find files, but going on: %s", missing)
+
+ metadata = {}
+ metadata['public-keys'] = key_value or pubkeys_from_crt_files(fp_files)
+ return metadata
+
def get_data(self):
# azure removes/ejects the cdrom containing the ovf-env.xml
# file on reboot. So, in order to successfully reboot we
@@ -124,77 +210,34 @@ class DataSourceAzureNet(sources.DataSource):
LOG.debug("using files cached in %s", ddir)
# azure / hyper-v provides random data here
- seed = util.load_file("/sys/firmware/acpi/tables/OEM0", quiet=True)
+ seed = util.load_file("/sys/firmware/acpi/tables/OEM0",
+ quiet=True, decode=False)
if seed:
self.metadata['random_seed'] = seed
# now update ds_cfg to reflect contents pass in config
user_ds_cfg = util.get_cfg_by_path(self.cfg, DS_CFG_PATH, {})
self.ds_cfg = util.mergemanydict([user_ds_cfg, self.ds_cfg])
- mycfg = self.ds_cfg
- ddir = mycfg['data_dir']
-
- if found != ddir:
- cached_ovfenv = util.load_file(
- os.path.join(ddir, 'ovf-env.xml'), quiet=True)
- if cached_ovfenv != files['ovf-env.xml']:
- # source was not walinux-agent's datadir, so we have to clean
- # up so 'wait_for_files' doesn't return early due to stale data
- cleaned = []
- for f in [os.path.join(ddir, f) for f in DATA_DIR_CLEAN_LIST]:
- if os.path.exists(f):
- util.del_file(f)
- cleaned.append(f)
- if cleaned:
- LOG.info("removed stale file(s) in '%s': %s",
- ddir, str(cleaned))
# walinux agent writes files world readable, but expects
# the directory to be protected.
- write_files(ddir, files, dirmode=0700)
-
- # handle the hostname 'publishing'
- try:
- handle_set_hostname(mycfg.get('set_hostname'),
- self.metadata.get('local-hostname'),
- mycfg['hostname_bounce'])
- except Exception as e:
- LOG.warn("Failed publishing hostname: %s", e)
- util.logexc(LOG, "handling set_hostname failed")
-
- try:
- invoke_agent(mycfg['agent_command'])
- except util.ProcessExecutionError:
- # claim the datasource even if the command failed
- util.logexc(LOG, "agent command '%s' failed.",
- mycfg['agent_command'])
-
- shcfgxml = os.path.join(ddir, "SharedConfig.xml")
- wait_for = [shcfgxml]
-
- fp_files = []
- for pk in self.cfg.get('_pubkeys', []):
- bname = str(pk['fingerprint'] + ".crt")
- fp_files += [os.path.join(ddir, bname)]
+ write_files(ddir, files, dirmode=0o700)
- missing = util.log_time(logfunc=LOG.debug, msg="waiting for files",
- func=wait_for_files,
- args=(wait_for + fp_files,))
- if len(missing):
- LOG.warn("Did not find files, but going on: %s", missing)
-
- if shcfgxml in missing:
- LOG.warn("SharedConfig.xml missing, using static instance-id")
+ if self.ds_cfg['agent_command'] == '__builtin__':
+ metadata_func = get_metadata_from_fabric
else:
- try:
- self.metadata['instance-id'] = iid_from_shared_config(shcfgxml)
- except ValueError as e:
- LOG.warn("failed to get instance id in %s: %s", shcfgxml, e)
+ metadata_func = self.get_metadata_from_agent
+ try:
+ fabric_data = metadata_func()
+ except Exception as exc:
+ LOG.info("Error communicating with Azure fabric; assume we aren't"
+ " on Azure.", exc_info=True)
+ return False
- pubkeys = pubkeys_from_crt_files(fp_files)
- self.metadata['public-keys'] = pubkeys
+ self.metadata['instance-id'] = util.read_dmi_data('system-uuid')
+ self.metadata.update(fabric_data)
- found_ephemeral = find_ephemeral_disk()
+ found_ephemeral = find_fabric_formatted_ephemeral_disk()
if found_ephemeral:
self.ds_cfg['disk_aliases']['ephemeral0'] = found_ephemeral
LOG.debug("using detected ephemeral0 of %s", found_ephemeral)
@@ -211,35 +254,42 @@ class DataSourceAzureNet(sources.DataSource):
def get_config_obj(self):
return self.cfg
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
def count_files(mp):
return len(fnmatch.filter(os.listdir(mp), '*[!cdrom]*'))
-def find_ephemeral_part():
+def find_fabric_formatted_ephemeral_part():
"""
- Locate the default ephmeral0.1 device. This will be the first device
- that has a LABEL of DEF_EPHEMERAL_LABEL and is a NTFS device. If Azure
- gets more ephemeral devices, this logic will only identify the first
- such device.
+ Locate the first fabric formatted ephemeral device.
"""
- c_label_devs = util.find_devs_with("LABEL=%s" % DEF_EPHEMERAL_LABEL)
- c_fstype_devs = util.find_devs_with("TYPE=ntfs")
- for dev in c_label_devs:
- if dev in c_fstype_devs:
- return dev
+ potential_locations = ['/dev/disk/cloud/azure_resource-part1',
+ '/dev/disk/azure/resource-part1']
+ device_location = None
+ for potential_location in potential_locations:
+ if os.path.exists(potential_location):
+ device_location = potential_location
+ break
+ if device_location is None:
+ return None
+ ntfs_devices = util.find_devs_with("TYPE=ntfs")
+ real_device = os.path.realpath(device_location)
+ if real_device in ntfs_devices:
+ return device_location
return None
-def find_ephemeral_disk():
+def find_fabric_formatted_ephemeral_disk():
"""
Get the ephemeral disk.
"""
- part_dev = find_ephemeral_part()
- if part_dev and str(part_dev[-1]).isdigit():
- return part_dev[:-1]
- elif part_dev:
- return part_dev
+ part_dev = find_fabric_formatted_ephemeral_part()
+ if part_dev:
+ return part_dev.split('-')[0]
return None
@@ -253,7 +303,7 @@ def support_new_ephemeral(cfg):
new ephemeral device is detected, cloud-init overrides the default
frequency for both disk-setup and mounts for the current boot only.
"""
- device = find_ephemeral_part()
+ device = find_fabric_formatted_ephemeral_part()
if not device:
LOG.debug("no default fabric formated ephemeral0.1 found")
return None
@@ -298,39 +348,15 @@ def support_new_ephemeral(cfg):
return mod_list
-def handle_set_hostname(enabled, hostname, cfg):
- if not util.is_true(enabled):
- return
-
- if not hostname:
- LOG.warn("set_hostname was true but no local-hostname")
- return
-
- apply_hostname_bounce(hostname=hostname, policy=cfg['policy'],
- interface=cfg['interface'],
- command=cfg['command'],
- hostname_command=cfg['hostname_command'])
-
-
-def apply_hostname_bounce(hostname, policy, interface, command,
- hostname_command="hostname"):
+def perform_hostname_bounce(hostname, cfg, prev_hostname):
# set the hostname to 'hostname' if it is not already set to that.
# then, if policy is not off, bounce the interface using command
- prev_hostname = util.subp(hostname_command, capture=True)[0].strip()
-
- util.subp([hostname_command, hostname])
-
- msg = ("phostname=%s hostname=%s policy=%s interface=%s" %
- (prev_hostname, hostname, policy, interface))
-
- if util.is_false(policy):
- LOG.debug("pubhname: policy false, skipping [%s]", msg)
- return
-
- if prev_hostname == hostname and policy != "force":
- LOG.debug("pubhname: no change, policy != force. skipping. [%s]", msg)
- return
+ command = cfg['command']
+ interface = cfg['interface']
+ policy = cfg['policy']
+ msg = ("hostname=%s policy=%s interface=%s" %
+ (hostname, policy, interface))
env = os.environ.copy()
env['interface'] = interface
env['hostname'] = hostname
@@ -343,15 +369,16 @@ def apply_hostname_bounce(hostname, policy, interface, command,
shell = not isinstance(command, (list, tuple))
# capture=False, see comments in bug 1202758 and bug 1206164.
util.log_time(logfunc=LOG.debug, msg="publishing hostname",
- get_uptime=True, func=util.subp,
- kwargs={'args': command, 'shell': shell, 'capture': False,
- 'env': env})
+ get_uptime=True, func=util.subp,
+ kwargs={'args': command, 'shell': shell, 'capture': False,
+ 'env': env})
-def crtfile_to_pubkey(fname):
+def crtfile_to_pubkey(fname, data=None):
pipeline = ('openssl x509 -noout -pubkey < "$0" |'
'ssh-keygen -i -m PKCS8 -f /dev/stdin')
- (out, _err) = util.subp(['sh', '-c', pipeline, fname], capture=True)
+ (out, _err) = util.subp(['sh', '-c', pipeline, fname],
+ capture=True, data=data)
return out.rstrip()
@@ -383,14 +410,30 @@ def wait_for_files(flist, maxwait=60, naplen=.5):
def write_files(datadir, files, dirmode=None):
+
+ def _redact_password(cnt, fname):
+ """Azure provides the UserPassword in plain text. So we redact it"""
+ try:
+ root = ET.fromstring(cnt)
+ for elem in root.iter():
+ if ('UserPassword' in elem.tag and
+ elem.text != DEF_PASSWD_REDACTION):
+ elem.text = DEF_PASSWD_REDACTION
+ return ET.tostring(root)
+ except Exception:
+ LOG.critical("failed to redact userpassword in {}".format(fname))
+ return cnt
+
if not datadir:
return
if not files:
files = {}
util.ensure_dir(datadir, dirmode)
for (name, content) in files.items():
- util.write_file(filename=os.path.join(datadir, name),
- content=content, mode=0600)
+ fname = os.path.join(datadir, name)
+ if 'ovf-env.xml' in name:
+ content = _redact_password(content, fname)
+ util.write_file(filename=fname, content=content, mode=0o600)
def invoke_agent(cmd):
@@ -441,7 +484,8 @@ def load_azure_ovf_pubkeys(sshnode):
for pk_node in pubkeys:
if not pk_node.hasChildNodes():
continue
- cur = {'fingerprint': "", 'path': ""}
+
+ cur = {'fingerprint': "", 'path': "", 'value': ""}
for child in pk_node.childNodes:
if child.nodeType == text_node or not child.localName:
continue
@@ -461,20 +505,6 @@ def load_azure_ovf_pubkeys(sshnode):
return found
-def single_node_at_path(node, pathlist):
- curnode = node
- for tok in pathlist:
- results = find_child(curnode, lambda n: n.localName == tok)
- if len(results) == 0:
- raise ValueError("missing %s token in %s" % (tok, str(pathlist)))
- if len(results) > 1:
- raise ValueError("found %s nodes of type %s looking for %s" %
- (len(results), tok, str(pathlist)))
- curnode = results[0]
-
- return curnode
-
-
def read_azure_ovf(contents):
try:
dom = minidom.parseString(contents)
@@ -482,7 +512,7 @@ def read_azure_ovf(contents):
raise BrokenAzureDataSource("invalid xml: %s" % e)
results = find_child(dom.documentElement,
- lambda n: n.localName == "ProvisioningSection")
+ lambda n: n.localName == "ProvisioningSection")
if len(results) == 0:
raise NonAzureDataSource("No ProvisioningSection")
@@ -492,7 +522,8 @@ def read_azure_ovf(contents):
provSection = results[0]
lpcs_nodes = find_child(provSection,
- lambda n: n.localName == "LinuxProvisioningConfigurationSet")
+ lambda n:
+ n.localName == "LinuxProvisioningConfigurationSet")
if len(results) == 0:
raise NonAzureDataSource("No LinuxProvisioningConfigurationSet")
@@ -559,7 +590,7 @@ def read_azure_ovf(contents):
defuser = {}
if username:
defuser['name'] = username
- if password:
+ if password and DEF_PASSWD_REDACTION != password:
defuser['passwd'] = encrypt_pass(password)
defuser['lock_passwd'] = False
@@ -592,32 +623,13 @@ def load_azure_ds_dir(source_dir):
if not os.path.isfile(ovf_file):
raise NonAzureDataSource("No ovf-env file found")
- with open(ovf_file, "r") as fp:
+ with open(ovf_file, "rb") as fp:
contents = fp.read()
md, ud, cfg = read_azure_ovf(contents)
return (md, ud, cfg, {'ovf-env.xml': contents})
-def iid_from_shared_config(path):
- with open(path, "rb") as fp:
- content = fp.read()
- return iid_from_shared_config_content(content)
-
-
-def iid_from_shared_config_content(content):
- """
- find INSTANCE_ID in:
- <?xml version="1.0" encoding="utf-8"?>
- <SharedConfig version="1.0.0.0" goalStateIncarnation="1">
- <Deployment name="INSTANCE_ID" guid="{...}" incarnation="0">
- <Service name="..." guid="{00000000-0000-0000-0000-000000000000}" />
- """
- dom = minidom.parseString(content)
- depnode = single_node_at_path(dom, ["SharedConfig", "Deployment"])
- return depnode.attributes.get('name').value
-
-
class BrokenAzureDataSource(Exception):
pass
@@ -628,7 +640,7 @@ class NonAzureDataSource(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceAzureNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceBigstep.py b/cloudinit/sources/DataSourceBigstep.py
new file mode 100644
index 00000000..b5ee4129
--- /dev/null
+++ b/cloudinit/sources/DataSourceBigstep.py
@@ -0,0 +1,57 @@
+#
+# Copyright (C) 2015-2016 Bigstep Cloud Ltd.
+#
+# Author: Alexandru Sirbu <alexandru.sirbu@bigstep.com>
+#
+
+import json
+import errno
+
+from cloudinit import log as logging
+from cloudinit import sources
+from cloudinit import util
+from cloudinit import url_helper
+
+LOG = logging.getLogger(__name__)
+
+
+class DataSourceBigstep(sources.DataSource):
+ def __init__(self, sys_cfg, distro, paths):
+ sources.DataSource.__init__(self, sys_cfg, distro, paths)
+ self.metadata = {}
+ self.vendordata_raw = ""
+ self.userdata_raw = ""
+
+ def get_data(self, apply_filter=False):
+ url = get_url_from_file()
+ if url is None:
+ return False
+ response = url_helper.readurl(url)
+ decoded = json.loads(response.contents)
+ self.metadata = decoded["metadata"]
+ self.vendordata_raw = decoded["vendordata_raw"]
+ self.userdata_raw = decoded["userdata_raw"]
+ return True
+
+
+def get_url_from_file():
+ try:
+ content = util.load_file("/var/lib/cloud/data/seed/bigstep/url")
+ except IOError as e:
+ # If the file doesn't exist, then the server probably isn't a Bigstep
+ # instance; otherwise, another problem exists which needs investigation
+ if e.errno == errno.ENOENT:
+ return None
+ else:
+ raise
+ return content
+
+# Used to match classes to dependencies
+datasources = [
+ (DataSourceBigstep, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
+
+
+# Return a list of data sources that match this set of dependencies
+def get_datasource_list(depends):
+ return sources.list_from_depends(depends, datasources)
diff --git a/cloudinit/sources/DataSourceCloudSigma.py b/cloudinit/sources/DataSourceCloudSigma.py
index 707cd0ce..f8f94759 100644
--- a/cloudinit/sources/DataSourceCloudSigma.py
+++ b/cloudinit/sources/DataSourceCloudSigma.py
@@ -44,27 +44,25 @@ class DataSourceCloudSigma(sources.DataSource):
def is_running_in_cloudsigma(self):
"""
- Uses dmidecode to detect if this instance of cloud-init is running
+ Uses dmi data to detect if this instance of cloud-init is running
in the CloudSigma's infrastructure.
"""
uname_arch = os.uname()[4]
if uname_arch.startswith("arm") or uname_arch == "aarch64":
- # Disabling because dmidecode in CMD_DMI_SYSTEM crashes kvm process
+ # Disabling because dmi data on ARM processors
LOG.debug("Disabling CloudSigma datasource on arm (LP: #1243287)")
return False
- dmidecode_path = util.which('dmidecode')
- if not dmidecode_path:
+ LOG.debug("determining hypervisor product name via dmi data")
+ sys_product_name = util.read_dmi_data("system-product-name")
+ if not sys_product_name:
+ LOG.warn("failed to get hypervisor product name via dmi data")
return False
+ else:
+ LOG.debug("detected hypervisor as %s", sys_product_name)
+ return 'cloudsigma' in sys_product_name.lower()
- LOG.debug("Determining hypervisor product name via dmidecode")
- try:
- cmd = [dmidecode_path, "--string", "system-product-name"]
- system_product_name, _ = util.subp(cmd)
- return 'cloudsigma' in system_product_name.lower()
- except:
- LOG.warn("Failed to get hypervisor product name via dmidecode")
-
+ LOG.warn("failed to query dmi data for system product name")
return False
def get_data(self):
diff --git a/cloudinit/sources/DataSourceCloudStack.py b/cloudinit/sources/DataSourceCloudStack.py
index 1bbeca59..64595020 100644
--- a/cloudinit/sources/DataSourceCloudStack.py
+++ b/cloudinit/sources/DataSourceCloudStack.py
@@ -26,18 +26,54 @@
import os
import time
+from socket import inet_ntoa
+from struct import pack
from cloudinit import ec2_utils as ec2
from cloudinit import log as logging
-from cloudinit import sources
from cloudinit import url_helper as uhelp
-from cloudinit import util
-from socket import inet_ntoa
-from struct import pack
+from cloudinit import sources, util
LOG = logging.getLogger(__name__)
+class CloudStackPasswordServerClient(object):
+ """
+ Implements password fetching from the CloudStack password server.
+
+ http://cloudstack-administration.readthedocs.org/
+ en/latest/templates.html#adding-password-management-to-your-templates
+ has documentation about the system. This implementation is following that
+ found at
+ https://github.com/shankerbalan/cloudstack-scripts/
+ blob/master/cloud-set-guest-password-debian
+ """
+
+ def __init__(self, virtual_router_address):
+ self.virtual_router_address = virtual_router_address
+
+ def _do_request(self, domu_request):
+ # The password server was in the past, a broken HTTP server, but is now
+ # fixed. wget handles this seamlessly, so it's easier to shell out to
+ # that rather than write our own handling code.
+ output, _ = util.subp([
+ 'wget', '--quiet', '--tries', '3', '--timeout', '20',
+ '--output-document', '-', '--header',
+ 'DomU_Request: {0}'.format(domu_request),
+ '{0}:8080'.format(self.virtual_router_address)
+ ])
+ return output.strip()
+
+ def get_password(self):
+ password = self._do_request('send_my_password')
+ if password in ['', 'saved_password']:
+ return None
+ if password == 'bad_request':
+ raise RuntimeError('Error when attempting to fetch root password.')
+ self._do_request('saved_password')
+ return password
+
+
class DataSourceCloudStack(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -45,10 +81,11 @@ class DataSourceCloudStack(sources.DataSource):
# Cloudstack has its metadata/userdata URLs located at
# http://<virtual-router-ip>/latest/
self.api_ver = 'latest'
- vr_addr = get_vr_address()
- if not vr_addr:
+ self.vr_addr = get_vr_address()
+ if not self.vr_addr:
raise RuntimeError("No virtual router found!")
- self.metadata_address = "http://%s/" % (vr_addr)
+ self.metadata_address = "http://%s/" % (self.vr_addr,)
+ self.cfg = {}
def _get_url_settings(self):
mcfg = self.ds_cfg
@@ -82,17 +119,20 @@ class DataSourceCloudStack(sources.DataSource):
'latest/meta-data/instance-id')]
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ timeout=timeout, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url)
else:
LOG.critical(("Giving up on waiting for the metadata from %s"
" after %s seconds"),
- urls, int(time.time() - start_time))
+ urls, int(time.time() - start_time))
return bool(url)
+ def get_config_obj(self):
+ return self.cfg
+
def get_data(self):
seed_ret = {}
if util.read_optional_seed(seed_ret, base=(self.seed_dir + "/")):
@@ -104,12 +144,28 @@ class DataSourceCloudStack(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
- self.metadata_address)
+ self.userdata_raw = ec2.get_instance_userdata(
+ self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
int(time.time() - start_time))
+ password_client = CloudStackPasswordServerClient(self.vr_addr)
+ try:
+ set_password = password_client.get_password()
+ except Exception:
+ util.logexc(LOG,
+ 'Failed to fetch password from virtual router %s',
+ self.vr_addr)
+ else:
+ if set_password:
+ self.cfg = {
+ 'ssh_pwauth': True,
+ 'password': set_password,
+ 'chpasswd': {
+ 'expire': False,
+ },
+ }
return True
except Exception:
util.logexc(LOG, 'Failed fetching from metadata service %s',
@@ -192,7 +248,7 @@ def get_vr_address():
# Used to match classes to dependencies
datasources = [
- (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceCloudStack, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceConfigDrive.py b/cloudinit/sources/DataSourceConfigDrive.py
index 15244a0d..3fa62ef3 100644
--- a/cloudinit/sources/DataSourceConfigDrive.py
+++ b/cloudinit/sources/DataSourceConfigDrive.py
@@ -18,6 +18,7 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+import copy
import os
from cloudinit import log as logging
@@ -39,7 +40,7 @@ FS_TYPES = ('vfat', 'iso9660')
LABEL_TYPES = ('config-2',)
POSSIBLE_MOUNTS = ('sr', 'cd')
OPTICAL_DEVICES = tuple(('/dev/%s%s' % (z, i) for z in POSSIBLE_MOUNTS
- for i in range(0, 2)))
+ for i in range(0, 2)))
class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
@@ -50,6 +51,8 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
self.seed_dir = os.path.join(paths.seed_dir, 'config_drive')
self.version = None
self.ec2_metadata = None
+ self._network_config = None
+ self.network_json = None
self.files = {}
def __str__(self):
@@ -144,8 +147,25 @@ class DataSourceConfigDrive(openstack.SourceMixin, sources.DataSource):
LOG.warn("Invalid content in vendor-data: %s", e)
self.vendordata_raw = None
+ try:
+ self.network_json = results.get('networkdata')
+ except ValueError as e:
+ LOG.warn("Invalid content in network-data: %s", e)
+ self.network_json = None
+
return True
+ def check_instance_id(self):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
+ @property
+ def network_config(self):
+ if self._network_config is None:
+ if self.network_json is not None:
+ self._network_config = convert_network_data(self.network_json)
+ return self._network_config
+
class DataSourceConfigDriveNet(DataSourceConfigDrive):
def __init__(self, sys_cfg, distro, paths):
@@ -216,11 +236,11 @@ def on_first_boot(data, distro=None):
files = data.get('files', {})
if files:
LOG.debug("Writing %s injected files", len(files))
- for (filename, content) in files.iteritems():
+ for (filename, content) in files.items():
if not filename.startswith(os.sep):
filename = os.sep + filename
try:
- util.write_file(filename, content, mode=0660)
+ util.write_file(filename, content, mode=0o660)
except IOError:
util.logexc(LOG, "Failed writing file: %s", filename)
@@ -283,3 +303,122 @@ datasources = [
# Return a list of data sources that match this set of dependencies
def get_datasource_list(depends):
return sources.list_from_depends(depends, datasources)
+
+
+# Convert OpenStack ConfigDrive NetworkData json to network_config yaml
+def convert_network_data(network_json=None):
+ """Return a dictionary of network_config by parsing provided
+ OpenStack ConfigDrive NetworkData json format
+
+ OpenStack network_data.json provides a 3 element dictionary
+ - "links" (links are network devices, physical or virtual)
+ - "networks" (networks are ip network configurations for one or more
+ links)
+ - services (non-ip services, like dns)
+
+ networks and links are combined via network items referencing specific
+ links via a 'link_id' which maps to a links 'id' field.
+
+ To convert this format to network_config yaml, we first iterate over the
+ links and then walk the network list to determine if any of the networks
+ utilize the current link; if so we generate a subnet entry for the device
+
+ We also need to map network_data.json fields to network_config fields. For
+ example, the network_data links 'id' field is equivalent to network_config
+ 'name' field for devices. We apply more of this mapping to the various
+ link types that we encounter.
+
+ There are additional fields that are populated in the network_data.json
+ from OpenStack that are not relevant to network_config yaml, so we
+ enumerate a dictionary of valid keys for network_yaml and apply filtering
+ to drop these superflous keys from the network_config yaml.
+ """
+ if network_json is None:
+ return None
+
+ # dict of network_config key for filtering network_json
+ valid_keys = {
+ 'physical': [
+ 'name',
+ 'type',
+ 'mac_address',
+ 'subnets',
+ 'params',
+ ],
+ 'subnet': [
+ 'type',
+ 'address',
+ 'netmask',
+ 'broadcast',
+ 'metric',
+ 'gateway',
+ 'pointopoint',
+ 'mtu',
+ 'scope',
+ 'dns_nameservers',
+ 'dns_search',
+ 'routes',
+ ],
+ }
+
+ links = network_json.get('links', [])
+ networks = network_json.get('networks', [])
+ services = network_json.get('services', [])
+
+ config = []
+ for link in links:
+ subnets = []
+ cfg = {k: v for k, v in link.items()
+ if k in valid_keys['physical']}
+ cfg.update({'name': link['id']})
+ for network in [net for net in networks
+ if net['link'] == link['id']]:
+ subnet = {k: v for k, v in network.items()
+ if k in valid_keys['subnet']}
+ if 'dhcp' in network['type']:
+ t = 'dhcp6' if network['type'].startswith('ipv6') else 'dhcp4'
+ subnet.update({
+ 'type': t,
+ })
+ else:
+ subnet.update({
+ 'type': 'static',
+ 'address': network.get('ip_address'),
+ })
+ subnets.append(subnet)
+ cfg.update({'subnets': subnets})
+ if link['type'] in ['ethernet', 'vif', 'ovs', 'phy']:
+ cfg.update({
+ 'type': 'physical',
+ 'mac_address': link['ethernet_mac_address']})
+ elif link['type'] in ['bond']:
+ params = {}
+ for k, v in link.items():
+ if k == 'bond_links':
+ continue
+ elif k.startswith('bond'):
+ params.update({k: v})
+ cfg.update({
+ 'bond_interfaces': copy.deepcopy(link['bond_links']),
+ 'params': params,
+ })
+ elif link['type'] in ['vlan']:
+ cfg.update({
+ 'name': "%s.%s" % (link['vlan_link'],
+ link['vlan_id']),
+ 'vlan_link': link['vlan_link'],
+ 'vlan_id': link['vlan_id'],
+ 'mac_address': link['vlan_mac_address'],
+ })
+ else:
+ raise ValueError(
+ 'Unknown network_data link type: %s' % link['type'])
+
+ config.append(cfg)
+
+ for service in services:
+ cfg = service
+ cfg.update({'type': 'nameserver'})
+ config.append(cfg)
+
+ return {'version': 1, 'config': config}
diff --git a/cloudinit/sources/DataSourceDigitalOcean.py b/cloudinit/sources/DataSourceDigitalOcean.py
index 8f27ee89..12e863d2 100644
--- a/cloudinit/sources/DataSourceDigitalOcean.py
+++ b/cloudinit/sources/DataSourceDigitalOcean.py
@@ -18,7 +18,7 @@ from cloudinit import log as logging
from cloudinit import util
from cloudinit import sources
from cloudinit import ec2_utils
-from types import StringType
+
import functools
@@ -54,9 +54,13 @@ class DataSourceDigitalOcean(sources.DataSource):
def get_data(self):
caller = functools.partial(util.read_file_or_url,
timeout=self.timeout, retries=self.retries)
- md = ec2_utils.MetadataMaterializer(str(caller(self.metadata_address)),
+
+ def mcaller(url):
+ return caller(url).contents
+
+ md = ec2_utils.MetadataMaterializer(mcaller(self.metadata_address),
base_url=self.metadata_address,
- caller=caller)
+ caller=mcaller)
self.metadata = md.materialize()
@@ -72,10 +76,11 @@ class DataSourceDigitalOcean(sources.DataSource):
return "\n".join(self.metadata['vendor-data'])
def get_public_ssh_keys(self):
- if type(self.metadata['public-keys']) is StringType:
- return [self.metadata['public-keys']]
+ public_keys = self.metadata['public-keys']
+ if isinstance(public_keys, list):
+ return public_keys
else:
- return self.metadata['public-keys']
+ return [public_keys]
@property
def availability_zone(self):
@@ -84,7 +89,7 @@ class DataSourceDigitalOcean(sources.DataSource):
def get_instance_id(self):
return self.metadata['id']
- def get_hostname(self, fqdn=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
return self.metadata['hostname']
def get_package_mirror_info(self):
@@ -96,8 +101,8 @@ class DataSourceDigitalOcean(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- ]
+ (DataSourceDigitalOcean, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+]
# Return a list of data sources that match this set of dependencies
diff --git a/cloudinit/sources/DataSourceEc2.py b/cloudinit/sources/DataSourceEc2.py
index 1b20ecf3..3ef2c6af 100644
--- a/cloudinit/sources/DataSourceEc2.py
+++ b/cloudinit/sources/DataSourceEc2.py
@@ -61,12 +61,12 @@ class DataSourceEc2(sources.DataSource):
if not self.wait_for_metadata_service():
return False
start_time = time.time()
- self.userdata_raw = ec2.get_instance_userdata(self.api_ver,
- self.metadata_address)
+ self.userdata_raw = \
+ ec2.get_instance_userdata(self.api_ver, self.metadata_address)
self.metadata = ec2.get_instance_metadata(self.api_ver,
self.metadata_address)
LOG.debug("Crawl of metadata service took %s seconds",
- int(time.time() - start_time))
+ int(time.time() - start_time))
return True
except Exception:
util.logexc(LOG, "Failed reading from metadata address %s",
@@ -132,13 +132,13 @@ class DataSourceEc2(sources.DataSource):
start_time = time.time()
url = uhelp.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout, status_cb=LOG.warn)
+ timeout=timeout, status_cb=LOG.warn)
if url:
LOG.debug("Using metadata source: '%s'", url2base[url])
else:
LOG.critical("Giving up on md from %s after %s seconds",
- urls, int(time.time() - start_time))
+ urls, int(time.time() - start_time))
self.metadata_address = url2base.get(url)
return bool(url)
@@ -156,8 +156,8 @@ class DataSourceEc2(sources.DataSource):
# 'ephemeral0': '/dev/sdb',
# 'root': '/dev/sda1'}
found = None
- bdm_items = self.metadata['block-device-mapping'].iteritems()
- for (entname, device) in bdm_items:
+ bdm = self.metadata['block-device-mapping']
+ for (entname, device) in bdm.items():
if entname == name:
found = device
break
@@ -197,9 +197,16 @@ class DataSourceEc2(sources.DataSource):
except KeyError:
return None
+ @property
+ def region(self):
+ az = self.availability_zone
+ if az is not None:
+ return az[:-1]
+ return None
+
# Used to match classes to dependencies
datasources = [
- (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceEc2, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceGCE.py b/cloudinit/sources/DataSourceGCE.py
index 2cf8fdcd..7e7fc033 100644
--- a/cloudinit/sources/DataSourceGCE.py
+++ b/cloudinit/sources/DataSourceGCE.py
@@ -30,6 +30,31 @@ BUILTIN_DS_CONFIG = {
REQUIRED_FIELDS = ('instance-id', 'availability-zone', 'local-hostname')
+class GoogleMetadataFetcher(object):
+ headers = {'X-Google-Metadata-Request': True}
+
+ def __init__(self, metadata_address):
+ self.metadata_address = metadata_address
+
+ def get_value(self, path, is_text):
+ value = None
+ try:
+ resp = url_helper.readurl(url=self.metadata_address + path,
+ headers=self.headers)
+ except url_helper.UrlError as exc:
+ msg = "url %s raised exception %s"
+ LOG.debug(msg, path, exc)
+ else:
+ if resp.code == 200:
+ if is_text:
+ value = util.decode_binary(resp.contents)
+ else:
+ value = resp.contents
+ else:
+ LOG.debug("url %s returned code %s", path, resp.code)
+ return value
+
+
class DataSourceGCE(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
@@ -50,18 +75,16 @@ class DataSourceGCE(sources.DataSource):
return public_key
def get_data(self):
- # GCE metadata server requires a custom header since v1
- headers = {'X-Google-Metadata-Request': True}
-
- # url_map: (our-key, path, required)
+ # url_map: (our-key, path, required, is_text)
url_map = [
- ('instance-id', 'instance/id', True),
- ('availability-zone', 'instance/zone', True),
- ('local-hostname', 'instance/hostname', True),
- ('public-keys', 'project/attributes/sshKeys', False),
- ('user-data', 'instance/attributes/user-data', False),
- ('user-data-encoding', 'instance/attributes/user-data-encoding',
- False),
+ ('instance-id', ('instance/id',), True, True),
+ ('availability-zone', ('instance/zone',), True, True),
+ ('local-hostname', ('instance/hostname',), True, True),
+ ('public-keys', ('project/attributes/sshKeys',
+ 'instance/attributes/sshKeys'), False, True),
+ ('user-data', ('instance/attributes/user-data',), False, False),
+ ('user-data-encoding', ('instance/attributes/user-data-encoding',),
+ False, True),
]
# if we cannot resolve the metadata server, then no point in trying
@@ -69,42 +92,34 @@ class DataSourceGCE(sources.DataSource):
LOG.debug("%s is not resolvable", self.metadata_address)
return False
+ metadata_fetcher = GoogleMetadataFetcher(self.metadata_address)
# iterate over url_map keys to get metadata items
- found = False
- for (mkey, path, required) in url_map:
- try:
- resp = url_helper.readurl(url=self.metadata_address + path,
- headers=headers)
- if resp.code == 200:
- found = True
- self.metadata[mkey] = resp.contents
+ running_on_gce = False
+ for (mkey, paths, required, is_text) in url_map:
+ value = None
+ for path in paths:
+ new_value = metadata_fetcher.get_value(path, is_text)
+ if new_value is not None:
+ value = new_value
+ if value:
+ running_on_gce = True
+ if required and value is None:
+ msg = "required key %s returned nothing. not GCE"
+ if not running_on_gce:
+ LOG.debug(msg, mkey)
else:
- if required:
- msg = "required url %s returned code %s. not GCE"
- if not found:
- LOG.debug(msg, path, resp.code)
- else:
- LOG.warn(msg, path, resp.code)
- return False
- else:
- self.metadata[mkey] = None
- except url_helper.UrlError as e:
- if required:
- msg = "required url %s raised exception %s. not GCE"
- if not found:
- LOG.debug(msg, path, e)
- else:
- LOG.warn(msg, path, e)
- return False
- msg = "Failed to get %s metadata item: %s."
- LOG.debug(msg, path, e)
-
- self.metadata[mkey] = None
+ LOG.warn(msg, mkey)
+ return False
+ self.metadata[mkey] = value
if self.metadata['public-keys']:
lines = self.metadata['public-keys'].splitlines()
self.metadata['public-keys'] = [self._trim_key(k) for k in lines]
+ if self.metadata['availability-zone']:
+ self.metadata['availability-zone'] = self.metadata[
+ 'availability-zone'].split('/')[-1]
+
encoding = self.metadata.get('user-data-encoding')
if encoding:
if encoding == 'base64':
@@ -113,7 +128,7 @@ class DataSourceGCE(sources.DataSource):
else:
LOG.warn('unknown user-data-encoding: %s, ignoring', encoding)
- return found
+ return running_on_gce
@property
def launch_index(self):
@@ -126,7 +141,7 @@ class DataSourceGCE(sources.DataSource):
def get_public_ssh_keys(self):
return self.metadata['public-keys']
- def get_hostname(self, fqdn=False, _resolve_ip=False):
+ def get_hostname(self, fqdn=False, resolve_ip=False):
# GCE has long FDQN's and has asked for short hostnames
return self.metadata['local-hostname'].split('.')[0]
@@ -137,6 +152,10 @@ class DataSourceGCE(sources.DataSource):
def availability_zone(self):
return self.metadata['availability-zone']
+ @property
+ def region(self):
+ return self.availability_zone.rsplit('-', 1)[0]
+
# Used to match classes to dependencies
datasources = [
(DataSourceGCE, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
diff --git a/cloudinit/sources/DataSourceMAAS.py b/cloudinit/sources/DataSourceMAAS.py
index dfe90bc6..d828f078 100644
--- a/cloudinit/sources/DataSourceMAAS.py
+++ b/cloudinit/sources/DataSourceMAAS.py
@@ -18,12 +18,11 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-from email.utils import parsedate
+from __future__ import print_function
+
import errno
-import oauth.oauth as oauth
import os
import time
-import urllib2
from cloudinit import log as logging
from cloudinit import sources
@@ -33,6 +32,8 @@ from cloudinit import util
LOG = logging.getLogger(__name__)
MD_VERSION = "2012-03-01"
+BINARY_FIELDS = ('user-data',)
+
class DataSourceMAAS(sources.DataSource):
"""
@@ -47,7 +48,20 @@ class DataSourceMAAS(sources.DataSource):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.base_url = None
self.seed_dir = os.path.join(paths.seed_dir, 'maas')
- self.oauth_clockskew = None
+ self.oauth_helper = self._get_helper()
+
+ def _get_helper(self):
+ mcfg = self.ds_cfg
+ # If we are missing token_key, token_secret or consumer_key
+ # then just do non-authed requests
+ for required in ('token_key', 'token_secret', 'consumer_key'):
+ if required not in mcfg:
+ return url_helper.OauthUrlHelper()
+
+ return url_helper.OauthUrlHelper(
+ consumer_key=mcfg['consumer_key'], token_key=mcfg['token_key'],
+ token_secret=mcfg['token_secret'],
+ consumer_secret=mcfg.get('consumer_secret'))
def __str__(self):
root = sources.DataSource.__str__(self)
@@ -74,14 +88,18 @@ class DataSourceMAAS(sources.DataSource):
return False
try:
+ # doing this here actually has a side affect of
+ # getting oauth time-fix in place. As no where else would
+ # retry by default, so even if we could fix the timestamp
+ # we would not.
if not self.wait_for_metadata_service(url):
return False
self.base_url = url
- (userdata, metadata) = read_maas_seed_url(self.base_url,
- self._md_headers,
- paths=self.paths)
+ (userdata, metadata) = read_maas_seed_url(
+ self.base_url, read_file_or_url=self.oauth_helper.readurl,
+ paths=self.paths, retries=1)
self.userdata_raw = userdata
self.metadata = metadata
return True
@@ -89,31 +107,8 @@ class DataSourceMAAS(sources.DataSource):
util.logexc(LOG, "Failed fetching metadata from url %s", url)
return False
- def _md_headers(self, url):
- mcfg = self.ds_cfg
-
- # If we are missing token_key, token_secret or consumer_key
- # then just do non-authed requests
- for required in ('token_key', 'token_secret', 'consumer_key'):
- if required not in mcfg:
- return {}
-
- consumer_secret = mcfg.get('consumer_secret', "")
-
- timestamp = None
- if self.oauth_clockskew:
- timestamp = int(time.time()) + self.oauth_clockskew
-
- return oauth_headers(url=url,
- consumer_key=mcfg['consumer_key'],
- token_key=mcfg['token_key'],
- token_secret=mcfg['token_secret'],
- consumer_secret=consumer_secret,
- timestamp=timestamp)
-
def wait_for_metadata_service(self, url):
mcfg = self.ds_cfg
-
max_wait = 120
try:
max_wait = int(mcfg.get("max_wait", max_wait))
@@ -133,10 +128,8 @@ class DataSourceMAAS(sources.DataSource):
starttime = time.time()
check_url = "%s/%s/meta-data/instance-id" % (url, MD_VERSION)
urls = [check_url]
- url = url_helper.wait_for_url(urls=urls, max_wait=max_wait,
- timeout=timeout,
- exception_cb=self._except_cb,
- headers_cb=self._md_headers)
+ url = self.oauth_helper.wait_for_url(
+ urls=urls, max_wait=max_wait, timeout=timeout)
if url:
LOG.debug("Using metadata source: '%s'", url)
@@ -146,26 +139,6 @@ class DataSourceMAAS(sources.DataSource):
return bool(url)
- def _except_cb(self, msg, exception):
- if not (isinstance(exception, url_helper.UrlError) and
- (exception.code == 403 or exception.code == 401)):
- return
-
- if 'date' not in exception.headers:
- LOG.warn("Missing header 'date' in %s response", exception.code)
- return
-
- date = exception.headers['date']
- try:
- ret_time = time.mktime(parsedate(date))
- except Exception as e:
- LOG.warn("Failed to convert datetime '%s': %s", date, e)
- return
-
- self.oauth_clockskew = int(ret_time - time.time())
- LOG.warn("Setting oauth clockskew to %d", self.oauth_clockskew)
- return
-
def read_maas_seed_dir(seed_d):
"""
@@ -182,7 +155,8 @@ def read_maas_seed_dir(seed_d):
md = {}
for fname in files:
try:
- md[fname] = util.load_file(os.path.join(seed_d, fname))
+ md[fname] = util.load_file(os.path.join(seed_d, fname),
+ decode=fname not in BINARY_FIELDS)
except IOError as e:
if e.errno != errno.ENOENT:
raise
@@ -190,12 +164,12 @@ def read_maas_seed_dir(seed_d):
return check_seed_contents(md, seed_d)
-def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
- version=MD_VERSION, paths=None):
+def read_maas_seed_url(seed_url, read_file_or_url=None, timeout=None,
+ version=MD_VERSION, paths=None, retries=None):
"""
Read the maas datasource at seed_url.
- - header_cb is a method that should return a headers dictionary for
- a given url
+ read_file_or_url is a method that should provide an interface
+ like util.read_file_or_url
Expected format of seed_url is are the following files:
* <seed_url>/<version>/meta-data/instance-id
@@ -215,27 +189,27 @@ def read_maas_seed_url(seed_url, header_cb=None, timeout=None,
'public-keys': "%s/%s" % (base_url, 'meta-data/public-keys'),
'user-data': "%s/%s" % (base_url, 'user-data'),
}
+
+ if read_file_or_url is None:
+ read_file_or_url = util.read_file_or_url
+
md = {}
for name in file_order:
url = files.get(name)
- if not header_cb:
- def _cb(url):
- return {}
- header_cb = _cb
-
if name == 'user-data':
- retries = 0
+ item_retries = 0
else:
- retries = None
+ item_retries = retries
try:
ssl_details = util.fetch_ssl_details(paths)
- resp = util.read_file_or_url(url, retries=retries,
- headers_cb=header_cb,
- timeout=timeout,
- ssl_details=ssl_details)
+ resp = read_file_or_url(url, retries=item_retries,
+ timeout=timeout, ssl_details=ssl_details)
if resp.ok():
- md[name] = str(resp)
+ if name in BINARY_FIELDS:
+ md[name] = resp.contents
+ else:
+ md[name] = util.decode_binary(resp.contents)
else:
LOG.warn(("Fetching from %s resulted in"
" an invalid http code %s"), url, resp.code)
@@ -260,9 +234,9 @@ def check_seed_contents(content, seed):
if len(missing):
raise MAASSeedDirMalformed("%s: missing files %s" % (seed, missing))
- userdata = content.get('user-data', "")
+ userdata = content.get('user-data', b"")
md = {}
- for (key, val) in content.iteritems():
+ for (key, val) in content.items():
if key == 'user-data':
continue
md[key] = val
@@ -270,29 +244,6 @@ def check_seed_contents(content, seed):
return (userdata, md)
-def oauth_headers(url, consumer_key, token_key, token_secret, consumer_secret,
- timestamp=None):
- consumer = oauth.OAuthConsumer(consumer_key, consumer_secret)
- token = oauth.OAuthToken(token_key, token_secret)
-
- if timestamp is None:
- ts = int(time.time())
- else:
- ts = timestamp
-
- params = {
- 'oauth_version': "1.0",
- 'oauth_nonce': oauth.generate_nonce(),
- 'oauth_timestamp': ts,
- 'oauth_token': token.key,
- 'oauth_consumer_key': consumer.key,
- }
- req = oauth.OAuthRequest(http_url=url, parameters=params)
- req.sign_request(oauth.OAuthSignatureMethod_PLAINTEXT(),
- consumer, token)
- return req.to_header()
-
-
class MAASSeedDirNone(Exception):
pass
@@ -303,7 +254,7 @@ class MAASSeedDirMalformed(Exception):
# Used to match classes to dependencies
datasources = [
- (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceMAAS, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
@@ -324,17 +275,18 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Interact with MAAS DS')
parser.add_argument("--config", metavar="file",
- help="specify DS config file", default=None)
+ help="specify DS config file", default=None)
parser.add_argument("--ckey", metavar="key",
- help="the consumer key to auth with", default=None)
+ help="the consumer key to auth with", default=None)
parser.add_argument("--tkey", metavar="key",
- help="the token key to auth with", default=None)
+ help="the token key to auth with", default=None)
parser.add_argument("--csec", metavar="secret",
- help="the consumer secret (likely '')", default="")
+ help="the consumer secret (likely '')", default="")
parser.add_argument("--tsec", metavar="secret",
- help="the token secret to auth with", default=None)
+ help="the token secret to auth with", default=None)
parser.add_argument("--apiver", metavar="version",
- help="the apiver to use ("" can be used)", default=MD_VERSION)
+ help="the apiver to use ("" can be used)",
+ default=MD_VERSION)
subcmds = parser.add_subparsers(title="subcommands", dest="subcmd")
subcmds.add_parser('crawl', help="crawl the datasource")
@@ -346,7 +298,7 @@ if __name__ == "__main__":
args = parser.parse_args()
creds = {'consumer_key': args.ckey, 'token_key': args.tkey,
- 'token_secret': args.tsec, 'consumer_secret': args.csec}
+ 'token_secret': args.tsec, 'consumer_secret': args.csec}
if args.config:
cfg = util.read_conf(args.config)
@@ -356,47 +308,46 @@ if __name__ == "__main__":
if key in cfg and creds[key] is None:
creds[key] = cfg[key]
- def geturl(url, headers_cb):
- req = urllib2.Request(url, data=None, headers=headers_cb(url))
- return (urllib2.urlopen(req).read())
+ oauth_helper = url_helper.OauthUrlHelper(**creds)
+
+ def geturl(url):
+ # the retry is to ensure that oauth timestamp gets fixed
+ return oauth_helper.readurl(url, retries=1).contents
- def printurl(url, headers_cb):
- print "== %s ==\n%s\n" % (url, geturl(url, headers_cb))
+ def printurl(url):
+ print("== %s ==\n%s\n" % (url, geturl(url).decode()))
- def crawl(url, headers_cb=None):
+ def crawl(url):
if url.endswith("/"):
- for line in geturl(url, headers_cb).splitlines():
+ for line in geturl(url).decode().splitlines():
if line.endswith("/"):
- crawl("%s%s" % (url, line), headers_cb)
+ crawl("%s%s" % (url, line))
+ elif line == "meta-data":
+ # meta-data is a dir, it *should* end in a /
+ crawl("%s%s" % (url, "meta-data/"))
else:
- printurl("%s%s" % (url, line), headers_cb)
+ printurl("%s%s" % (url, line))
else:
- printurl(url, headers_cb)
-
- def my_headers(url):
- headers = {}
- if creds.get('consumer_key', None) is not None:
- headers = oauth_headers(url, **creds)
- return headers
+ printurl(url)
if args.subcmd == "check-seed":
- if args.url.startswith("http"):
- (userdata, metadata) = read_maas_seed_url(args.url,
- header_cb=my_headers,
- version=args.apiver)
- else:
- (userdata, metadata) = read_maas_seed_url(args.url)
- print "=== userdata ==="
- print userdata
- print "=== metadata ==="
+ readurl = oauth_helper.readurl
+ if args.url[0] == "/" or args.url.startswith("file://"):
+ readurl = None
+ (userdata, metadata) = read_maas_seed_url(
+ args.url, version=args.apiver, read_file_or_url=readurl,
+ retries=2)
+ print("=== userdata ===")
+ print(userdata.decode())
+ print("=== metadata ===")
pprint.pprint(metadata)
elif args.subcmd == "get":
- printurl(args.url, my_headers)
+ printurl(args.url)
elif args.subcmd == "crawl":
if not args.url.endswith("/"):
args.url = "%s/" % args.url
- crawl(args.url, my_headers)
+ crawl(args.url)
main()
diff --git a/cloudinit/sources/DataSourceNoCloud.py b/cloudinit/sources/DataSourceNoCloud.py
index c26a645c..c2fba4d2 100644
--- a/cloudinit/sources/DataSourceNoCloud.py
+++ b/cloudinit/sources/DataSourceNoCloud.py
@@ -36,7 +36,9 @@ class DataSourceNoCloud(sources.DataSource):
self.dsmode = 'local'
self.seed = None
self.cmdline_id = "ds=nocloud"
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud')
+ self.seed_dirs = [os.path.join(paths.seed_dir, 'nocloud'),
+ os.path.join(paths.seed_dir, 'nocloud-net')]
+ self.seed_dir = None
self.supported_seed_starts = ("/", "file://")
def __str__(self):
@@ -50,31 +52,32 @@ class DataSourceNoCloud(sources.DataSource):
}
found = []
- mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': ""}
+ mydata = {'meta-data': {}, 'user-data': "", 'vendor-data': "",
+ 'network-config': {}}
try:
# Parse the kernel command line, getting data passed in
md = {}
if parse_cmdline_data(self.cmdline_id, md):
found.append("cmdline")
- mydata['meta-data'].update(md)
+ mydata = _merge_new_seed(mydata, {'meta-data': md})
except:
util.logexc(LOG, "Unable to parse command line data")
return False
# Check to see if the seed dir has data.
pp2d_kwargs = {'required': ['user-data', 'meta-data'],
- 'optional': ['vendor-data']}
-
- try:
- seeded = util.pathprefix2dict(self.seed_dir, **pp2d_kwargs)
- found.append(self.seed_dir)
- LOG.debug("Using seeded data from %s", self.seed_dir)
- except ValueError as e:
- pass
-
- if self.seed_dir in found:
- mydata = _merge_new_seed(mydata, seeded)
+ 'optional': ['vendor-data', 'network-config']}
+
+ for path in self.seed_dirs:
+ try:
+ seeded = util.pathprefix2dict(path, **pp2d_kwargs)
+ found.append(path)
+ LOG.debug("Using seeded data from %s", path)
+ mydata = _merge_new_seed(mydata, seeded)
+ break
+ except ValueError as e:
+ pass
# If the datasource config had a 'seedfrom' entry, then that takes
# precedence over a 'seedfrom' that was found in a filesystem
@@ -124,7 +127,7 @@ class DataSourceNoCloud(sources.DataSource):
# that is more likely to be what is desired. If they want
# dsmode of local, then they must specify that.
if 'dsmode' not in mydata['meta-data']:
- mydata['dsmode'] = "net"
+ mydata['meta-data']['dsmode'] = "net"
LOG.debug("Using data from %s", dev)
found.append(dev)
@@ -141,8 +144,7 @@ class DataSourceNoCloud(sources.DataSource):
if len(found) == 0:
return False
- seeded_interfaces = None
-
+ seeded_network = None
# The special argument "seedfrom" indicates we should
# attempt to seed the userdata / metadata from its value
# its primarily value is in allowing the user to type less
@@ -158,8 +160,9 @@ class DataSourceNoCloud(sources.DataSource):
LOG.debug("Seed from %s not supported by %s", seedfrom, self)
return False
- if 'network-interfaces' in mydata['meta-data']:
- seeded_interfaces = self.dsmode
+ if (mydata['meta-data'].get('network-interfaces') or
+ mydata.get('network-config')):
+ seeded_network = self.dsmode
# This could throw errors, but the user told us to do it
# so if errors are raised, let them raise
@@ -176,26 +179,75 @@ class DataSourceNoCloud(sources.DataSource):
mydata['meta-data'] = util.mergemanydict([mydata['meta-data'],
defaults])
- # Update the network-interfaces if metadata had 'network-interfaces'
- # entry and this is the local datasource, or 'seedfrom' was used
- # and the source of the seed was self.dsmode
- # ('local' for NoCloud, 'net' for NoCloudNet')
- if ('network-interfaces' in mydata['meta-data'] and
- (self.dsmode in ("local", seeded_interfaces))):
- LOG.debug("Updating network interfaces from %s", self)
- self.distro.apply_network(
- mydata['meta-data']['network-interfaces'])
+ netdata = {'format': None, 'data': None}
+ if mydata['meta-data'].get('network-interfaces'):
+ netdata['format'] = 'interfaces'
+ netdata['data'] = mydata['meta-data']['network-interfaces']
+ elif mydata.get('network-config'):
+ netdata['format'] = 'network-config'
+ netdata['data'] = mydata['network-config']
+
+ # if this is the local datasource or 'seedfrom' was used
+ # and the source of the seed was self.dsmode.
+ # Then see if there is network config to apply.
+ # note this is obsolete network-interfaces style seeding.
+ if self.dsmode in ("local", seeded_network):
+ if mydata['meta-data'].get('network-interfaces'):
+ LOG.debug("Updating network interfaces from %s", self)
+ self.distro.apply_network(
+ mydata['meta-data']['network-interfaces'])
if mydata['meta-data']['dsmode'] == self.dsmode:
self.seed = ",".join(found)
self.metadata = mydata['meta-data']
self.userdata_raw = mydata['user-data']
- self.vendordata = mydata['vendor-data']
+ self.vendordata_raw = mydata['vendor-data']
+ self._network_config = mydata['network-config']
return True
- LOG.debug("%s: not claiming datasource, dsmode=%s", self, md['dsmode'])
+ LOG.debug("%s: not claiming datasource, dsmode=%s", self,
+ mydata['meta-data']['dsmode'])
return False
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ # we check kernel command line or files.
+ current = self.get_instance_id()
+ if not current:
+ return None
+
+ quick_id = _quick_read_instance_id(cmdline_id=self.cmdline_id,
+ dirs=self.seed_dirs)
+ if not quick_id:
+ return None
+ return quick_id == current
+
+ @property
+ def network_config(self):
+ return self._network_config
+
+
+def _quick_read_instance_id(cmdline_id, dirs=None):
+ if dirs is None:
+ dirs = []
+
+ iid_key = 'instance-id'
+ if cmdline_id is None:
+ fill = {}
+ if parse_cmdline_data(cmdline_id, fill) and iid_key in fill:
+ return fill[iid_key]
+
+ for d in dirs:
+ try:
+ data = util.pathprefix2dict(d, required=['meta-data'])
+ md = util.load_yaml(data['meta-data'])
+ if iid_key in md:
+ return md[iid_key]
+ except ValueError:
+ pass
+
+ return None
+
# Returns true or false indicating if cmdline indicated
# that this module should be used
@@ -243,9 +295,17 @@ def parse_cmdline_data(ds_id, fill, cmdline=None):
def _merge_new_seed(cur, seeded):
ret = cur.copy()
- ret['meta-data'] = util.mergemanydict([cur['meta-data'],
- util.load_yaml(seeded['meta-data'])])
- ret['user-data'] = seeded['user-data']
+
+ newmd = seeded.get('meta-data', {})
+ if not isinstance(seeded['meta-data'], dict):
+ newmd = util.load_yaml(seeded['meta-data'])
+ ret['meta-data'] = util.mergemanydict([cur['meta-data'], newmd])
+
+ if seeded.get('network-config'):
+ ret['network-config'] = util.load_yaml(seeded['network-config'])
+
+ if 'user-data' in seeded:
+ ret['user-data'] = seeded['user-data']
if 'vendor-data' in seeded:
ret['vendor-data'] = seeded['vendor-data']
return ret
@@ -256,14 +316,13 @@ class DataSourceNoCloudNet(DataSourceNoCloud):
DataSourceNoCloud.__init__(self, sys_cfg, distro, paths)
self.cmdline_id = "ds=nocloud-net"
self.supported_seed_starts = ("http://", "https://", "ftp://")
- self.seed_dir = os.path.join(paths.seed_dir, 'nocloud-net')
self.dsmode = "net"
# Used to match classes to dependencies
datasources = [
- (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
- (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNoCloud, (sources.DEP_FILESYSTEM, )),
+ (DataSourceNoCloudNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
]
diff --git a/cloudinit/sources/DataSourceNone.py b/cloudinit/sources/DataSourceNone.py
index 12a8a992..d1a62b2a 100644
--- a/cloudinit/sources/DataSourceNone.py
+++ b/cloudinit/sources/DataSourceNone.py
@@ -47,8 +47,8 @@ class DataSourceNone(sources.DataSource):
# Used to match classes to dependencies
datasources = [
- (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
- (DataSourceNone, []),
+ (DataSourceNone, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceNone, []),
]
diff --git a/cloudinit/sources/DataSourceOVF.py b/cloudinit/sources/DataSourceOVF.py
index 7ba60735..2a6cd050 100644
--- a/cloudinit/sources/DataSourceOVF.py
+++ b/cloudinit/sources/DataSourceOVF.py
@@ -25,10 +25,22 @@ from xml.dom import minidom
import base64
import os
import re
+import time
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+from .helpers.vmware.imc.config import Config
+from .helpers.vmware.imc.config_file import ConfigFile
+from .helpers.vmware.imc.config_nic import NicConfigurator
+from .helpers.vmware.imc.guestcust_event import GuestCustEventEnum
+from .helpers.vmware.imc.guestcust_state import GuestCustStateEnum
+from .helpers.vmware.imc.guestcust_error import GuestCustErrorEnum
+from .helpers.vmware.imc.guestcust_util import (
+ set_customization_status,
+ get_nics_to_enable,
+ enable_nics
+)
LOG = logging.getLogger(__name__)
@@ -50,23 +62,95 @@ class DataSourceOVF(sources.DataSource):
found = []
md = {}
ud = ""
+ vmwarePlatformFound = False
+ vmwareImcConfigFilePath = ''
defaults = {
"instance-id": "iid-dsovf",
}
(seedfile, contents) = get_ovf_env(self.paths.seed_dir)
+
+ system_type = util.read_dmi_data("system-product-name")
+ if system_type is None:
+ LOG.debug("No system-product-name found")
+
if seedfile:
# Found a seed dir
seed = os.path.join(self.paths.seed_dir, seedfile)
(md, ud, cfg) = read_ovf_environment(contents)
self.environment = contents
found.append(seed)
+ elif system_type and 'vmware' in system_type.lower():
+ LOG.debug("VMware Virtualization Platform found")
+ if not util.get_cfg_option_bool(
+ self.sys_cfg, "disable_vmware_customization", True):
+ deployPkgPluginPath = search_file("/usr/lib/vmware-tools",
+ "libdeployPkgPlugin.so")
+ if not deployPkgPluginPath:
+ deployPkgPluginPath = search_file("/usr/lib/open-vm-tools",
+ "libdeployPkgPlugin.so")
+ if deployPkgPluginPath:
+ # When the VM is powered on, the "VMware Tools" daemon
+ # copies the customization specification file to
+ # /var/run/vmware-imc directory. cloud-init code needs
+ # to search for the file in that directory.
+ vmwareImcConfigFilePath = util.log_time(
+ logfunc=LOG.debug,
+ msg="waiting for configuration file",
+ func=wait_for_imc_cfg_file,
+ args=("/var/run/vmware-imc", "cust.cfg"))
+
+ if vmwareImcConfigFilePath:
+ LOG.debug("Found VMware DeployPkg Config File at %s" %
+ vmwareImcConfigFilePath)
+ else:
+ LOG.debug("Did not find VMware DeployPkg Config File Path")
+ else:
+ LOG.debug("Customization for VMware platform is disabled.")
+
+ if vmwareImcConfigFilePath:
+ nics = ""
+ try:
+ cf = ConfigFile(vmwareImcConfigFilePath)
+ conf = Config(cf)
+ (md, ud, cfg) = read_vmware_imc(conf)
+ dirpath = os.path.dirname(vmwareImcConfigFilePath)
+ nics = get_nics_to_enable(dirpath)
+ except Exception as e:
+ LOG.debug("Error parsing the customization Config File")
+ LOG.exception(e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_CUSTOMIZE_FAILED)
+ enable_nics(nics)
+ return False
+ finally:
+ util.del_dir(os.path.dirname(vmwareImcConfigFilePath))
+
+ try:
+ LOG.debug("Applying the Network customization")
+ nicConfigurator = NicConfigurator(conf.nics)
+ nicConfigurator.configure()
+ except Exception as e:
+ LOG.debug("Error applying the Network Configuration")
+ LOG.exception(e)
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_NETWORK_SETUP_FAILED)
+ enable_nics(nics)
+ return False
+
+ vmwarePlatformFound = True
+ set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_DONE,
+ GuestCustErrorEnum.GUESTCUST_ERROR_SUCCESS)
+ enable_nics(nics)
else:
np = {'iso': transport_iso9660,
'vmware-guestd': transport_vmware_guestd, }
name = None
- for (name, transfunc) in np.iteritems():
+ for (name, transfunc) in np.items():
(contents, _dev, _fname) = transfunc()
if contents:
break
@@ -76,7 +160,7 @@ class DataSourceOVF(sources.DataSource):
found.append(name)
# There was no OVF transports found
- if len(found) == 0:
+ if len(found) == 0 and not vmwarePlatformFound:
return False
if 'seedfrom' in md and md['seedfrom']:
@@ -129,6 +213,36 @@ class DataSourceOVFNet(DataSourceOVF):
self.supported_seed_starts = ("http://", "https://", "ftp://")
+def wait_for_imc_cfg_file(dirpath, filename, maxwait=180, naplen=5):
+ waited = 0
+
+ while waited < maxwait:
+ fileFullPath = search_file(dirpath, filename)
+ if fileFullPath:
+ return fileFullPath
+ time.sleep(naplen)
+ waited += naplen
+ return None
+
+
+# This will return a dict with some content
+# meta-data, user-data, some config
+def read_vmware_imc(config):
+ md = {}
+ cfg = {}
+ ud = ""
+ if config.host_name:
+ if config.domain_name:
+ md['local-hostname'] = config.host_name + "." + config.domain_name
+ else:
+ md['local-hostname'] = config.host_name
+
+ if config.timezone:
+ cfg['timezone'] = config.timezone
+
+ return (md, ud, cfg)
+
+
# This will return a dict with some content
# meta-data, user-data, some config
def read_ovf_environment(contents):
@@ -138,7 +252,7 @@ def read_ovf_environment(contents):
ud = ""
cfg_props = ['password']
md_props = ['seedfrom', 'local-hostname', 'public-keys', 'instance-id']
- for (prop, val) in props.iteritems():
+ for (prop, val) in props.items():
if prop == 'hostname':
prop = "local-hostname"
if prop in md_props:
@@ -183,7 +297,7 @@ def transport_iso9660(require_iso=True):
# Go through mounts to see if it was already mounted
mounts = util.mounts()
- for (dev, info) in mounts.iteritems():
+ for (dev, info) in mounts.items():
fstype = info['fstype']
if fstype != "iso9660" and require_iso:
continue
@@ -264,14 +378,14 @@ def get_properties(contents):
# could also check here that elem.namespaceURI ==
# "http://schemas.dmtf.org/ovf/environment/1"
propSections = find_child(dom.documentElement,
- lambda n: n.localName == "PropertySection")
+ lambda n: n.localName == "PropertySection")
if len(propSections) == 0:
raise XmlError("No 'PropertySection's")
props = {}
propElems = find_child(propSections[0],
- (lambda n: n.localName == "Property"))
+ (lambda n: n.localName == "Property"))
for elem in propElems:
key = elem.attributes.getNamedItemNS(envNsURI, "key").value
@@ -281,14 +395,25 @@ def get_properties(contents):
return props
+def search_file(dirpath, filename):
+ if not dirpath or not filename:
+ return None
+
+ for root, dirs, files in os.walk(dirpath):
+ if filename in files:
+ return os.path.join(root, filename)
+
+ return None
+
+
class XmlError(Exception):
pass
# Used to match classes to dependencies
datasources = (
- (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
- (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
+ (DataSourceOVF, (sources.DEP_FILESYSTEM, )),
+ (DataSourceOVFNet, (sources.DEP_FILESYSTEM, sources.DEP_NETWORK)),
)
diff --git a/cloudinit/sources/DataSourceOpenNebula.py b/cloudinit/sources/DataSourceOpenNebula.py
index e2469f6e..681f3a96 100644
--- a/cloudinit/sources/DataSourceOpenNebula.py
+++ b/cloudinit/sources/DataSourceOpenNebula.py
@@ -24,7 +24,6 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
-import base64
import os
import pwd
import re
@@ -34,6 +33,7 @@ from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
+
LOG = logging.getLogger(__name__)
DEFAULT_IID = "iid-dsopennebula"
@@ -149,8 +149,8 @@ class BrokenContextDiskDir(Exception):
class OpenNebulaNetwork(object):
REG_DEV_MAC = re.compile(
- r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
- re.MULTILINE | re.DOTALL)
+ r'^\d+: (eth\d+):.*?link\/ether (..:..:..:..:..:..) ?',
+ re.MULTILINE | re.DOTALL)
def __init__(self, ip, context):
self.ip = ip
@@ -280,7 +280,7 @@ def parse_shell_config(content, keylist=None, bash=None, asuser=None,
# allvars expands to all existing variables by using '${!x*}' notation
# where x is lower or upper case letters or '_'
- allvars = ["${!%s*}" % x for x in string.letters + "_"]
+ allvars = ["${!%s*}" % x for x in string.ascii_letters + "_"]
keylist_in = keylist
if keylist is None:
@@ -379,9 +379,8 @@ def read_context_disk_dir(source_dir, asuser=None):
raise BrokenContextDiskDir("configured user '%s' "
"does not exist", asuser)
try:
- with open(os.path.join(source_dir, 'context.sh'), 'r') as f:
- content = f.read().strip()
-
+ path = os.path.join(source_dir, 'context.sh')
+ content = util.load_file(path)
context = parse_shell_config(content, asuser=asuser)
except util.ProcessExecutionError as e:
raise BrokenContextDiskDir("Error processing context.sh: %s" % (e))
@@ -405,7 +404,8 @@ def read_context_disk_dir(source_dir, asuser=None):
if ssh_key_var:
lines = context.get(ssh_key_var).splitlines()
results['metadata']['public-keys'] = [l for l in lines
- if len(l) and not l.startswith("#")]
+ if len(l) and not
+ l.startswith("#")]
# custom hostname -- try hostname or leave cloud-init
# itself create hostname from IP address later
@@ -426,14 +426,14 @@ def read_context_disk_dir(source_dir, asuser=None):
context.get('USER_DATA_ENCODING'))
if encoding == "base64":
try:
- results['userdata'] = base64.b64decode(results['userdata'])
+ results['userdata'] = util.b64d(results['userdata'])
except TypeError:
LOG.warn("Failed base64 decoding of userdata")
# generate static /etc/network/interfaces
# only if there are any required context variables
# http://opennebula.org/documentation:rel3.8:cong#network_configuration
- for k in context.keys():
+ for k in context:
if re.match(r'^ETH\d+_IP$', k):
(out, _) = util.subp(['/sbin/ip', 'link'])
net = OpenNebulaNetwork(out, context)
diff --git a/cloudinit/sources/DataSourceOpenStack.py b/cloudinit/sources/DataSourceOpenStack.py
index 469c2e2a..f7f4590b 100644
--- a/cloudinit/sources/DataSourceOpenStack.py
+++ b/cloudinit/sources/DataSourceOpenStack.py
@@ -150,6 +150,10 @@ class DataSourceOpenStack(openstack.SourceMixin, sources.DataSource):
return True
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still valid
+ return sources.instance_id_matches_system_uuid(self.get_instance_id())
+
def read_metadata_service(base_url, ssl_details=None):
reader = openstack.MetadataReader(base_url, ssl_details=ssl_details)
diff --git a/cloudinit/sources/DataSourceSmartOS.py b/cloudinit/sources/DataSourceSmartOS.py
index 2733a2f6..5edab152 100644
--- a/cloudinit/sources/DataSourceSmartOS.py
+++ b/cloudinit/sources/DataSourceSmartOS.py
@@ -20,28 +20,38 @@
# Datasource for provisioning on SmartOS. This works on Joyent
# and public/private Clouds using SmartOS.
#
-# SmartOS hosts use a serial console (/dev/ttyS1) on Linux Guests.
+# SmartOS hosts use a serial console (/dev/ttyS1) on KVM Linux Guests
# The meta-data is transmitted via key/value pairs made by
# requests on the console. For example, to get the hostname, you
# would send "GET hostname" on /dev/ttyS1.
+# For Linux Guests running in LX-Brand Zones on SmartOS hosts
+# a socket (/native/.zonecontrol/metadata.sock) is used instead
+# of a serial console.
#
# Certain behavior is defined by the DataDictionary
# http://us-east.manta.joyent.com/jmc/public/mdata/datadict.html
# Comments with "@datadictionary" are snippets of the definition
-import base64
+import binascii
+import contextlib
+import os
+import random
+import re
+import socket
+import stat
+
+import serial
+
from cloudinit import log as logging
from cloudinit import sources
from cloudinit import util
-import os
-import os.path
-import serial
LOG = logging.getLogger(__name__)
SMARTOS_ATTRIB_MAP = {
# Cloud-init Key : (SmartOS Key, Strip line endings)
+ 'instance-id': ('sdc:uuid', True),
'local-hostname': ('hostname', True),
'public-keys': ('root_authorized_keys', True),
'user-script': ('user-script', False),
@@ -72,6 +82,7 @@ DS_CFG_PATH = ['datasource', DS_NAME]
#
BUILTIN_DS_CONFIG = {
'serial_device': '/dev/ttyS1',
+ 'metadata_sockfile': '/native/.zonecontrol/metadata.sock',
'seed_timeout': 60,
'no_base64_decode': ['root_authorized_keys',
'motd_sys_info',
@@ -79,7 +90,7 @@ BUILTIN_DS_CONFIG = {
'user-data',
'user-script',
'sdc:datacenter_name',
- ],
+ 'sdc:uuid'],
'base64_keys': [],
'base64_all': False,
'disk_aliases': {'ephemeral0': '/dev/vdb'},
@@ -90,7 +101,7 @@ BUILTIN_CLOUD_CONFIG = {
'ephemeral0': {'table_type': 'mbr',
'layout': False,
'overwrite': False}
- },
+ },
'fs_setup': [{'label': 'ephemeral0',
'filesystem': 'ext3',
'device': 'ephemeral0'}],
@@ -146,17 +157,27 @@ class DataSourceSmartOS(sources.DataSource):
def __init__(self, sys_cfg, distro, paths):
sources.DataSource.__init__(self, sys_cfg, distro, paths)
self.is_smartdc = None
-
self.ds_cfg = util.mergemanydict([
self.ds_cfg,
util.get_cfg_by_path(sys_cfg, DS_CFG_PATH, {}),
BUILTIN_DS_CONFIG])
self.metadata = {}
- self.cfg = BUILTIN_CLOUD_CONFIG
- self.seed = self.ds_cfg.get("serial_device")
- self.seed_timeout = self.ds_cfg.get("serial_timeout")
+ # SDC LX-Brand Zones lack dmidecode (no /dev/mem) but
+ # report 'BrandZ virtual linux' as the kernel version
+ if os.uname()[3].lower() == 'brandz virtual linux':
+ LOG.debug("Host is SmartOS, guest in Zone")
+ self.is_smartdc = True
+ self.smartos_type = 'lx-brand'
+ self.cfg = {}
+ self.seed = self.ds_cfg.get("metadata_sockfile")
+ else:
+ self.is_smartdc = True
+ self.smartos_type = 'kvm'
+ self.seed = self.ds_cfg.get("serial_device")
+ self.cfg = BUILTIN_CLOUD_CONFIG
+ self.seed_timeout = self.ds_cfg.get("serial_timeout")
self.smartos_no_base64 = self.ds_cfg.get('no_base64_decode')
self.b64_keys = self.ds_cfg.get('base64_keys')
self.b64_all = self.ds_cfg.get('base64_all')
@@ -166,12 +187,49 @@ class DataSourceSmartOS(sources.DataSource):
root = sources.DataSource.__str__(self)
return "%s [seed=%s]" % (root, self.seed)
+ def _get_seed_file_object(self):
+ if not self.seed:
+ raise AttributeError("seed device is not set")
+
+ if self.smartos_type == 'lx-brand':
+ if not stat.S_ISSOCK(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a socket", self.seed)
+ return None
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.connect(self.seed)
+ return sock.makefile('rwb')
+ else:
+ if not stat.S_ISCHR(os.stat(self.seed).st_mode):
+ LOG.debug("Seed %s is not a character device")
+ return None
+ ser = serial.Serial(self.seed, timeout=self.seed_timeout)
+ if not ser.isOpen():
+ raise SystemError("Unable to open %s" % self.seed)
+ return ser
+ return None
+
+ def _set_provisioned(self):
+ '''Mark the instance provisioning state as successful.
+
+ When run in a zone, the host OS will look for /var/svc/provisioning
+ to be renamed as /var/svc/provision_success. This should be done
+ after meta-data is successfully retrieved and from this point
+ the host considers the provision of the zone to be a success and
+ keeps the zone running.
+ '''
+
+ LOG.debug('Instance provisioning state set as successful')
+ svc_path = '/var/svc'
+ if os.path.exists('/'.join([svc_path, 'provisioning'])):
+ os.rename('/'.join([svc_path, 'provisioning']),
+ '/'.join([svc_path, 'provision_success']))
+
def get_data(self):
md = {}
ud = ""
if not device_exists(self.seed):
- LOG.debug("No serial device '%s' found for SmartOS datasource",
+ LOG.debug("No metadata device '%s' found for SmartOS datasource",
self.seed)
return False
@@ -181,29 +239,36 @@ class DataSourceSmartOS(sources.DataSource):
LOG.debug("Disabling SmartOS datasource on arm (LP: #1243287)")
return False
- dmi_info = dmi_data()
- if dmi_info is False:
- LOG.debug("No dmidata utility found")
- return False
-
- system_uuid, system_type = tuple(dmi_info)
- if 'smartdc' not in system_type.lower():
- LOG.debug("Host is not on SmartOS. system_type=%s", system_type)
+ # SDC KVM instances will provide dmi data, LX-brand does not
+ if self.smartos_type == 'kvm':
+ dmi_info = dmi_data()
+ if dmi_info is False:
+ LOG.debug("No dmidata utility found")
+ return False
+
+ system_type = dmi_info
+ if 'smartdc' not in system_type.lower():
+ LOG.debug("Host is not on SmartOS. system_type=%s",
+ system_type)
+ return False
+ LOG.debug("Host is SmartOS, guest in KVM")
+
+ seed_obj = self._get_seed_file_object()
+ if seed_obj is None:
+ LOG.debug('Seed file object not found.')
return False
- self.is_smartdc = True
- md['instance-id'] = system_uuid
-
- b64_keys = self.query('base64_keys', strip=True, b64=False)
- if b64_keys is not None:
- self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
+ with contextlib.closing(seed_obj) as seed:
+ b64_keys = self.query('base64_keys', seed, strip=True, b64=False)
+ if b64_keys is not None:
+ self.b64_keys = [k.strip() for k in str(b64_keys).split(',')]
- b64_all = self.query('base64_all', strip=True, b64=False)
- if b64_all is not None:
- self.b64_all = util.is_true(b64_all)
+ b64_all = self.query('base64_all', seed, strip=True, b64=False)
+ if b64_all is not None:
+ self.b64_all = util.is_true(b64_all)
- for ci_noun, attribute in SMARTOS_ATTRIB_MAP.iteritems():
- smartos_noun, strip = attribute
- md[ci_noun] = self.query(smartos_noun, strip=strip)
+ for ci_noun, attribute in SMARTOS_ATTRIB_MAP.items():
+ smartos_noun, strip = attribute
+ md[ci_noun] = self.query(smartos_noun, seed, strip=strip)
# @datadictionary: This key may contain a program that is written
# to a file in the filesystem of the guest on each boot and then
@@ -218,11 +283,12 @@ class DataSourceSmartOS(sources.DataSource):
user_script = os.path.join(data_d, 'user-script')
u_script_l = "%s/user-script" % LEGACY_USER_D
write_boot_content(md.get('user-script'), content_f=user_script,
- link=u_script_l, shebang=True, mode=0700)
+ link=u_script_l, shebang=True, mode=0o700)
operator_script = os.path.join(data_d, 'operator-script')
write_boot_content(md.get('operator-script'),
- content_f=operator_script, shebang=False, mode=0700)
+ content_f=operator_script, shebang=False,
+ mode=0o700)
# @datadictionary: This key has no defined format, but its value
# is written to the file /var/db/mdata-user-data on each boot prior
@@ -235,7 +301,7 @@ class DataSourceSmartOS(sources.DataSource):
# Handle the cloud-init regular meta
if not md['local-hostname']:
- md['local-hostname'] = system_uuid
+ md['local-hostname'] = md['instance-id']
ud = None
if md['user-data']:
@@ -252,6 +318,8 @@ class DataSourceSmartOS(sources.DataSource):
self.metadata = util.mergemanydict([md, self.metadata])
self.userdata_raw = ud
self.vendordata_raw = md['vendor-data']
+
+ self._set_provisioned()
return True
def device_name_to_device(self, name):
@@ -263,125 +331,146 @@ class DataSourceSmartOS(sources.DataSource):
def get_instance_id(self):
return self.metadata['instance-id']
- def query(self, noun, strip=False, default=None, b64=None):
+ def query(self, noun, seed_file, strip=False, default=None, b64=None):
if b64 is None:
if noun in self.smartos_no_base64:
b64 = False
elif self.b64_all or noun in self.b64_keys:
b64 = True
- return query_data(noun=noun, strip=strip, seed_device=self.seed,
- seed_timeout=self.seed_timeout, default=default,
- b64=b64)
+ return self._query_data(noun, seed_file, strip=strip,
+ default=default, b64=b64)
+ def _query_data(self, noun, seed_file, strip=False,
+ default=None, b64=None):
+ """Makes a request via "GET <NOUN>"
-def device_exists(device):
- """Symplistic method to determine if the device exists or not"""
- return os.path.exists(device)
+ In the response, the first line is the status, while subsequent
+ lines are is the value. A blank line with a "." is used to
+ indicate end of response.
+ If the response is expected to be base64 encoded, then set
+ b64encoded to true. Unfortantely, there is no way to know if
+ something is 100% encoded, so this method relies on being told
+ if the data is base64 or not.
+ """
-def get_serial(seed_device, seed_timeout):
- """This is replaced in unit testing, allowing us to replace
- serial.Serial with a mocked class.
-
- The timeout value of 60 seconds should never be hit. The value
- is taken from SmartOS own provisioning tools. Since we are reading
- each line individually up until the single ".", the transfer is
- usually very fast (i.e. microseconds) to get the response.
- """
- if not seed_device:
- raise AttributeError("seed_device value is not set")
-
- ser = serial.Serial(seed_device, timeout=seed_timeout)
- if not ser.isOpen():
- raise SystemError("Unable to open %s" % seed_device)
-
- return ser
+ if not noun:
+ return False
+ response = JoyentMetadataClient(seed_file).get_metadata(noun)
-def query_data(noun, seed_device, seed_timeout, strip=False, default=None,
- b64=None):
- """Makes a request to via the serial console via "GET <NOUN>"
+ if response is None:
+ return default
- In the response, the first line is the status, while subsequent lines
- are is the value. A blank line with a "." is used to indicate end of
- response.
+ if b64 is None:
+ b64 = self._query_data('b64-%s' % noun, seed_file, b64=False,
+ default=False, strip=True)
+ b64 = util.is_true(b64)
- If the response is expected to be base64 encoded, then set b64encoded
- to true. Unfortantely, there is no way to know if something is 100%
- encoded, so this method relies on being told if the data is base64 or
- not.
- """
+ resp = None
+ if b64 or strip:
+ resp = "".join(response).rstrip()
+ else:
+ resp = "".join(response)
- if not noun:
- return False
+ if b64:
+ try:
+ return util.b64d(resp)
+ # Bogus input produces different errors in Python 2 and 3;
+ # catch both.
+ except (TypeError, binascii.Error):
+ LOG.warn("Failed base64 decoding key '%s'", noun)
+ return resp
- ser = get_serial(seed_device, seed_timeout)
- ser.write("GET %s\n" % noun.rstrip())
- status = str(ser.readline()).rstrip()
- response = []
- eom_found = False
+ return resp
- if 'SUCCESS' not in status:
- ser.close()
- return default
- while not eom_found:
- m = ser.readline()
- if m.rstrip() == ".":
- eom_found = True
- else:
- response.append(m)
+def device_exists(device):
+ """Symplistic method to determine if the device exists or not"""
+ return os.path.exists(device)
- ser.close()
- if b64 is None:
- b64 = query_data('b64-%s' % noun, seed_device=seed_device,
- seed_timeout=seed_timeout, b64=False,
- default=False, strip=True)
- b64 = util.is_true(b64)
+class JoyentMetadataFetchException(Exception):
+ pass
- resp = None
- if b64 or strip:
- resp = "".join(response).rstrip()
- else:
- resp = "".join(response)
- if b64:
- try:
- return base64.b64decode(resp)
- except TypeError:
- LOG.warn("Failed base64 decoding key '%s'", noun)
- return resp
+class JoyentMetadataClient(object):
+ """
+ A client implementing v2 of the Joyent Metadata Protocol Specification.
- return resp
+ The full specification can be found at
+ http://eng.joyent.com/mdata/protocol.html
+ """
+ line_regex = re.compile(
+ r'V2 (?P<length>\d+) (?P<checksum>[0-9a-f]+)'
+ r' (?P<body>(?P<request_id>[0-9a-f]+) (?P<status>SUCCESS|NOTFOUND)'
+ r'( (?P<payload>.+))?)')
+
+ def __init__(self, metasource):
+ self.metasource = metasource
+
+ def _checksum(self, body):
+ return '{0:08x}'.format(
+ binascii.crc32(body.encode('utf-8')) & 0xffffffff)
+
+ def _get_value_from_frame(self, expected_request_id, frame):
+ frame_data = self.line_regex.match(frame).groupdict()
+ if int(frame_data['length']) != len(frame_data['body']):
+ raise JoyentMetadataFetchException(
+ 'Incorrect frame length given ({0} != {1}).'.format(
+ frame_data['length'], len(frame_data['body'])))
+ expected_checksum = self._checksum(frame_data['body'])
+ if frame_data['checksum'] != expected_checksum:
+ raise JoyentMetadataFetchException(
+ 'Invalid checksum (expected: {0}; got {1}).'.format(
+ expected_checksum, frame_data['checksum']))
+ if frame_data['request_id'] != expected_request_id:
+ raise JoyentMetadataFetchException(
+ 'Request ID mismatch (expected: {0}; got {1}).'.format(
+ expected_request_id, frame_data['request_id']))
+ if not frame_data.get('payload', None):
+ LOG.debug('No value found.')
+ return None
+ value = util.b64d(frame_data['payload'])
+ LOG.debug('Value "%s" found.', value)
+ return value
+
+ def get_metadata(self, metadata_key):
+ LOG.debug('Fetching metadata key "%s"...', metadata_key)
+ request_id = '{0:08x}'.format(random.randint(0, 0xffffffff))
+ message_body = '{0} GET {1}'.format(request_id,
+ util.b64e(metadata_key))
+ msg = 'V2 {0} {1} {2}\n'.format(
+ len(message_body), self._checksum(message_body), message_body)
+ LOG.debug('Writing "%s" to metadata transport.', msg)
+ self.metasource.write(msg.encode('ascii'))
+ self.metasource.flush()
+
+ response = bytearray()
+ response.extend(self.metasource.read(1))
+ while response[-1:] != b'\n':
+ response.extend(self.metasource.read(1))
+ response = response.rstrip().decode('ascii')
+ LOG.debug('Read "%s" from metadata transport.', response)
+
+ if 'SUCCESS' not in response:
+ return None
+
+ return self._get_value_from_frame(request_id, response)
def dmi_data():
- sys_uuid, sys_type = None, None
- dmidecode_path = util.which('dmidecode')
- if not dmidecode_path:
- return False
-
- sys_uuid_cmd = [dmidecode_path, "-s", "system-uuid"]
- try:
- LOG.debug("Getting hostname from dmidecode")
- (sys_uuid, _err) = util.subp(sys_uuid_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to get system UUID", e)
+ sys_type = util.read_dmi_data("system-product-name")
- sys_type_cmd = [dmidecode_path, "-s", "system-product-name"]
- try:
- LOG.debug("Determining hypervisor product name via dmidecode")
- (sys_type, _err) = util.subp(sys_type_cmd)
- except Exception as e:
- util.logexc(LOG, "Failed to get system UUID", e)
+ if not sys_type:
+ return None
- return (sys_uuid.lower().strip(), sys_type.strip())
+ return sys_type
def write_boot_content(content, content_f, link=None, shebang=False,
- mode=0400):
+ mode=0o400):
"""
Write the content to content_f. Under the following rules:
1. If no content, remove the file
@@ -423,7 +512,7 @@ def write_boot_content(content, content_f, link=None, shebang=False,
except Exception as e:
util.logexc(LOG, ("Failed to identify script type for %s" %
- content_f, e))
+ content_f, e))
if link:
try:
diff --git a/cloudinit/sources/__init__.py b/cloudinit/sources/__init__.py
index 7c7ef9ab..82cd3553 100644
--- a/cloudinit/sources/__init__.py
+++ b/cloudinit/sources/__init__.py
@@ -23,6 +23,8 @@
import abc
import os
+import six
+
from cloudinit import importer
from cloudinit import log as logging
from cloudinit import type_utils
@@ -30,6 +32,7 @@ from cloudinit import user_data as ud
from cloudinit import util
from cloudinit.filters import launch_index
+from cloudinit.reporting import events
DEP_FILESYSTEM = "FILESYSTEM"
DEP_NETWORK = "NETWORK"
@@ -130,7 +133,7 @@ class DataSource(object):
# we want to return the correct value for what will actually
# exist in this instance
mappings = {"sd": ("vd", "xvd", "vtb")}
- for (nfrom, tlist) in mappings.iteritems():
+ for (nfrom, tlist) in mappings.items():
if not short_name.startswith(nfrom):
continue
for nto in tlist:
@@ -155,6 +158,10 @@ class DataSource(object):
return self.metadata.get('availability-zone',
self.metadata.get('availability_zone'))
+ @property
+ def region(self):
+ return self.metadata.get('region')
+
def get_instance_id(self):
if not self.metadata or 'instance-id' not in self.metadata:
# Return a magic not really instance id string
@@ -208,8 +215,15 @@ class DataSource(object):
return hostname
def get_package_mirror_info(self):
- return self.distro.get_package_mirror_info(
- availability_zone=self.availability_zone)
+ return self.distro.get_package_mirror_info(data_source=self)
+
+ def check_instance_id(self, sys_cfg):
+ # quickly (local check only) if self.instance_id is still
+ return False
+
+ @property
+ def network_config(self):
+ return None
def normalize_pubkey_data(pubkey_data):
@@ -218,18 +232,18 @@ def normalize_pubkey_data(pubkey_data):
if not pubkey_data:
return keys
- if isinstance(pubkey_data, (basestring, str)):
+ if isinstance(pubkey_data, six.string_types):
return str(pubkey_data).splitlines()
if isinstance(pubkey_data, (list, set)):
return list(pubkey_data)
if isinstance(pubkey_data, (dict)):
- for (_keyname, klist) in pubkey_data.iteritems():
+ for (_keyname, klist) in pubkey_data.items():
# lp:506332 uec metadata service responds with
# data that makes boto populate a string for 'klist' rather
# than a list.
- if isinstance(klist, (str, basestring)):
+ if isinstance(klist, six.string_types):
klist = [klist]
if isinstance(klist, (list, set)):
for pkey in klist:
@@ -241,17 +255,25 @@ def normalize_pubkey_data(pubkey_data):
return keys
-def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list):
+def find_source(sys_cfg, distro, paths, ds_deps, cfg_list, pkg_list, reporter):
ds_list = list_sources(cfg_list, ds_deps, pkg_list)
ds_names = [type_utils.obj_name(f) for f in ds_list]
- LOG.debug("Searching for data source in: %s", ds_names)
-
- for cls in ds_list:
+ mode = "network" if DEP_NETWORK in ds_deps else "local"
+ LOG.debug("Searching for %s data source in: %s", mode, ds_names)
+
+ for name, cls in zip(ds_names, ds_list):
+ myrep = events.ReportEventStack(
+ name="search-%s" % name.replace("DataSource", ""),
+ description="searching for %s data from %s" % (mode, name),
+ message="no %s data found from %s" % (mode, name),
+ parent=reporter)
try:
- LOG.debug("Seeing if we can get any data from %s", cls)
- s = cls(sys_cfg, distro, paths)
- if s.get_data():
- return (s, type_utils.obj_name(cls))
+ with myrep:
+ LOG.debug("Seeing if we can get any data from %s", cls)
+ s = cls(sys_cfg, distro, paths)
+ if s.get_data():
+ myrep.message = "found %s data from %s" % (mode, name)
+ return (s, type_utils.obj_name(cls))
except Exception:
util.logexc(LOG, "Getting data from %s failed", cls)
@@ -285,6 +307,18 @@ def list_sources(cfg_list, depends, pkg_list):
return src_list
+def instance_id_matches_system_uuid(instance_id, field='system-uuid'):
+ # quickly (local check only) if self.instance_id is still valid
+ # we check kernel command line or files.
+ if not instance_id:
+ return False
+
+ dmi_value = util.read_dmi_data(field)
+ if not dmi_value:
+ return False
+ return instance_id.lower() == dmi_value.lower()
+
+
# 'depends' is a list of dependencies (DEP_FILESYSTEM)
# ds_list is a list of 2 item lists
# ds_list = [
diff --git a/cloudinit/sources/helpers/azure.py b/cloudinit/sources/helpers/azure.py
new file mode 100644
index 00000000..018cac6d
--- /dev/null
+++ b/cloudinit/sources/helpers/azure.py
@@ -0,0 +1,278 @@
+import logging
+import os
+import re
+import socket
+import struct
+import tempfile
+import time
+from contextlib import contextmanager
+from xml.etree import ElementTree
+
+from cloudinit import util
+
+
+LOG = logging.getLogger(__name__)
+
+
+@contextmanager
+def cd(newdir):
+ prevdir = os.getcwd()
+ os.chdir(os.path.expanduser(newdir))
+ try:
+ yield
+ finally:
+ os.chdir(prevdir)
+
+
+class AzureEndpointHttpClient(object):
+
+ headers = {
+ 'x-ms-agent-name': 'WALinuxAgent',
+ 'x-ms-version': '2012-11-30',
+ }
+
+ def __init__(self, certificate):
+ self.extra_secure_headers = {
+ "x-ms-cipher-name": "DES_EDE3_CBC",
+ "x-ms-guest-agent-public-x509-cert": certificate,
+ }
+
+ def get(self, url, secure=False):
+ headers = self.headers
+ if secure:
+ headers = self.headers.copy()
+ headers.update(self.extra_secure_headers)
+ return util.read_file_or_url(url, headers=headers)
+
+ def post(self, url, data=None, extra_headers=None):
+ headers = self.headers
+ if extra_headers is not None:
+ headers = self.headers.copy()
+ headers.update(extra_headers)
+ return util.read_file_or_url(url, data=data, headers=headers)
+
+
+class GoalState(object):
+
+ def __init__(self, xml, http_client):
+ self.http_client = http_client
+ self.root = ElementTree.fromstring(xml)
+ self._certificates_xml = None
+
+ def _text_from_xpath(self, xpath):
+ element = self.root.find(xpath)
+ if element is not None:
+ return element.text
+ return None
+
+ @property
+ def container_id(self):
+ return self._text_from_xpath('./Container/ContainerId')
+
+ @property
+ def incarnation(self):
+ return self._text_from_xpath('./Incarnation')
+
+ @property
+ def instance_id(self):
+ return self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance/InstanceId')
+
+ @property
+ def certificates_xml(self):
+ if self._certificates_xml is None:
+ url = self._text_from_xpath(
+ './Container/RoleInstanceList/RoleInstance'
+ '/Configuration/Certificates')
+ if url is not None:
+ self._certificates_xml = self.http_client.get(
+ url, secure=True).contents
+ return self._certificates_xml
+
+
+class OpenSSLManager(object):
+
+ certificate_names = {
+ 'private_key': 'TransportPrivate.pem',
+ 'certificate': 'TransportCert.pem',
+ }
+
+ def __init__(self):
+ self.tmpdir = tempfile.mkdtemp()
+ self.certificate = None
+ self.generate_certificate()
+
+ def clean_up(self):
+ util.del_dir(self.tmpdir)
+
+ def generate_certificate(self):
+ LOG.debug('Generating certificate for communication with fabric...')
+ if self.certificate is not None:
+ LOG.debug('Certificate already generated.')
+ return
+ with cd(self.tmpdir):
+ util.subp([
+ 'openssl', 'req', '-x509', '-nodes', '-subj',
+ '/CN=LinuxTransport', '-days', '32768', '-newkey', 'rsa:2048',
+ '-keyout', self.certificate_names['private_key'],
+ '-out', self.certificate_names['certificate'],
+ ])
+ certificate = ''
+ for line in open(self.certificate_names['certificate']):
+ if "CERTIFICATE" not in line:
+ certificate += line.rstrip()
+ self.certificate = certificate
+ LOG.debug('New certificate generated.')
+
+ def parse_certificates(self, certificates_xml):
+ tag = ElementTree.fromstring(certificates_xml).find(
+ './/Data')
+ certificates_content = tag.text
+ lines = [
+ b'MIME-Version: 1.0',
+ b'Content-Disposition: attachment; filename="Certificates.p7m"',
+ b'Content-Type: application/x-pkcs7-mime; name="Certificates.p7m"',
+ b'Content-Transfer-Encoding: base64',
+ b'',
+ certificates_content.encode('utf-8'),
+ ]
+ with cd(self.tmpdir):
+ with open('Certificates.p7m', 'wb') as f:
+ f.write(b'\n'.join(lines))
+ out, _ = util.subp(
+ 'openssl cms -decrypt -in Certificates.p7m -inkey'
+ ' {private_key} -recip {certificate} | openssl pkcs12 -nodes'
+ ' -password pass:'.format(**self.certificate_names),
+ shell=True)
+ private_keys, certificates = [], []
+ current = []
+ for line in out.splitlines():
+ current.append(line)
+ if re.match(r'[-]+END .*?KEY[-]+$', line):
+ private_keys.append('\n'.join(current))
+ current = []
+ elif re.match(r'[-]+END .*?CERTIFICATE[-]+$', line):
+ certificates.append('\n'.join(current))
+ current = []
+ keys = []
+ for certificate in certificates:
+ with cd(self.tmpdir):
+ public_key, _ = util.subp(
+ 'openssl x509 -noout -pubkey |'
+ 'ssh-keygen -i -m PKCS8 -f /dev/stdin',
+ data=certificate,
+ shell=True)
+ keys.append(public_key)
+ return keys
+
+
+class WALinuxAgentShim(object):
+
+ REPORT_READY_XML_TEMPLATE = '\n'.join([
+ '<?xml version="1.0" encoding="utf-8"?>',
+ '<Health xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"'
+ ' xmlns:xsd="http://www.w3.org/2001/XMLSchema">',
+ ' <GoalStateIncarnation>{incarnation}</GoalStateIncarnation>',
+ ' <Container>',
+ ' <ContainerId>{container_id}</ContainerId>',
+ ' <RoleInstanceList>',
+ ' <Role>',
+ ' <InstanceId>{instance_id}</InstanceId>',
+ ' <Health>',
+ ' <State>Ready</State>',
+ ' </Health>',
+ ' </Role>',
+ ' </RoleInstanceList>',
+ ' </Container>',
+ '</Health>'])
+
+ def __init__(self):
+ LOG.debug('WALinuxAgentShim instantiated...')
+ self.endpoint = self.find_endpoint()
+ self.openssl_manager = None
+ self.values = {}
+
+ def clean_up(self):
+ if self.openssl_manager is not None:
+ self.openssl_manager.clean_up()
+
+ @staticmethod
+ def get_ip_from_lease_value(lease_value):
+ unescaped_value = lease_value.replace('\\', '')
+ if len(unescaped_value) > 4:
+ hex_string = ''
+ for hex_pair in unescaped_value.split(':'):
+ if len(hex_pair) == 1:
+ hex_pair = '0' + hex_pair
+ hex_string += hex_pair
+ packed_bytes = struct.pack(
+ '>L', int(hex_string.replace(':', ''), 16))
+ else:
+ packed_bytes = unescaped_value.encode('utf-8')
+ return socket.inet_ntoa(packed_bytes)
+
+ @staticmethod
+ def find_endpoint():
+ LOG.debug('Finding Azure endpoint...')
+ content = util.load_file('/var/lib/dhcp/dhclient.eth0.leases')
+ value = None
+ for line in content.splitlines():
+ if 'unknown-245' in line:
+ value = line.strip(' ').split(' ', 2)[-1].strip(';\n"')
+ if value is None:
+ raise Exception('No endpoint found in DHCP config.')
+ endpoint_ip_address = WALinuxAgentShim.get_ip_from_lease_value(value)
+ LOG.debug('Azure endpoint found at %s', endpoint_ip_address)
+ return endpoint_ip_address
+
+ def register_with_azure_and_fetch_data(self):
+ self.openssl_manager = OpenSSLManager()
+ http_client = AzureEndpointHttpClient(self.openssl_manager.certificate)
+ LOG.info('Registering with Azure...')
+ attempts = 0
+ while True:
+ try:
+ response = http_client.get(
+ 'http://{0}/machine/?comp=goalstate'.format(self.endpoint))
+ except Exception:
+ if attempts < 10:
+ time.sleep(attempts + 1)
+ else:
+ raise
+ else:
+ break
+ attempts += 1
+ LOG.debug('Successfully fetched GoalState XML.')
+ goal_state = GoalState(response.contents, http_client)
+ public_keys = []
+ if goal_state.certificates_xml is not None:
+ LOG.debug('Certificate XML found; parsing out public keys.')
+ public_keys = self.openssl_manager.parse_certificates(
+ goal_state.certificates_xml)
+ data = {
+ 'public-keys': public_keys,
+ }
+ self._report_ready(goal_state, http_client)
+ return data
+
+ def _report_ready(self, goal_state, http_client):
+ LOG.debug('Reporting ready to Azure fabric.')
+ document = self.REPORT_READY_XML_TEMPLATE.format(
+ incarnation=goal_state.incarnation,
+ container_id=goal_state.container_id,
+ instance_id=goal_state.instance_id,
+ )
+ http_client.post(
+ "http://{0}/machine?comp=health".format(self.endpoint),
+ data=document,
+ extra_headers={'Content-Type': 'text/xml; charset=utf-8'},
+ )
+ LOG.info('Reported ready to Azure fabric.')
+
+
+def get_metadata_from_fabric():
+ shim = WALinuxAgentShim()
+ try:
+ return shim.register_with_azure_and_fetch_data()
+ finally:
+ shim.clean_up()
diff --git a/cloudinit/sources/helpers/openstack.py b/cloudinit/sources/helpers/openstack.py
index b7e19314..1aa6bbae 100644
--- a/cloudinit/sources/helpers/openstack.py
+++ b/cloudinit/sources/helpers/openstack.py
@@ -24,6 +24,8 @@ import copy
import functools
import os
+import six
+
from cloudinit import ec2_utils
from cloudinit import log as logging
from cloudinit import sources
@@ -49,11 +51,13 @@ OS_LATEST = 'latest'
OS_FOLSOM = '2012-08-10'
OS_GRIZZLY = '2013-04-04'
OS_HAVANA = '2013-10-17'
+OS_LIBERTY = '2015-10-15'
# keep this in chronological order. new supported versions go at the end.
OS_VERSIONS = (
OS_FOLSOM,
OS_GRIZZLY,
OS_HAVANA,
+ OS_LIBERTY,
)
@@ -205,7 +209,7 @@ class BaseReader(object):
"""
load_json_anytype = functools.partial(
- util.load_json, root_types=(dict, basestring, list))
+ util.load_json, root_types=(dict, list) + six.string_types)
def datafiles(version):
files = {}
@@ -227,6 +231,11 @@ class BaseReader(object):
False,
load_json_anytype,
)
+ files['networkdata'] = (
+ self._path_join("openstack", version, 'network_data.json'),
+ False,
+ load_json_anytype,
+ )
return files
results = {
@@ -234,7 +243,7 @@ class BaseReader(object):
'version': 2,
}
data = datafiles(self._find_working_version())
- for (name, (path, required, translator)) in data.iteritems():
+ for (name, (path, required, translator)) in data.items():
path = self._path_join(self.base_path, path)
data = None
found = False
@@ -325,14 +334,14 @@ class ConfigDriveReader(BaseReader):
return os.path.join(*components)
def _path_read(self, path):
- return util.load_file(path)
+ return util.load_file(path, decode=False)
def _fetch_available_versions(self):
if self._versions is None:
path = self._path_join(self.base_path, 'openstack')
found = [d for d in os.listdir(path)
if os.path.isdir(os.path.join(path))]
- self._versions = found
+ self._versions = sorted(found)
return self._versions
def _read_ec2_metadata(self):
@@ -364,7 +373,7 @@ class ConfigDriveReader(BaseReader):
raise NonReadable("%s: no files found" % (self.base_path))
md = {}
- for (name, (key, translator, default)) in FILES_V1.iteritems():
+ for (name, (key, translator, default)) in FILES_V1.items():
if name in found:
path = found[name]
try:
@@ -478,7 +487,7 @@ def convert_vendordata_json(data, recurse=True):
"""
if not data:
return None
- if isinstance(data, (str, unicode, basestring)):
+ if isinstance(data, six.string_types):
return data
if isinstance(data, list):
return copy.deepcopy(data)
diff --git a/cloudinit/sources/helpers/vmware/__init__.py b/cloudinit/sources/helpers/vmware/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/__init__.py b/cloudinit/sources/helpers/vmware/imc/__init__.py
new file mode 100644
index 00000000..386225d5
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/__init__.py
@@ -0,0 +1,13 @@
+# vi: ts=4 expandtab
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
diff --git a/cloudinit/sources/helpers/vmware/imc/boot_proto.py b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
new file mode 100644
index 00000000..faba5887
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/boot_proto.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class BootProtoEnum:
+ """Specifies the NIC Boot Settings."""
+
+ DHCP = 'dhcp'
+ STATIC = 'static'
diff --git a/cloudinit/sources/helpers/vmware/imc/config.py b/cloudinit/sources/helpers/vmware/imc/config.py
new file mode 100644
index 00000000..aebc12a0
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config.py
@@ -0,0 +1,95 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .nic import Nic
+
+
+class Config:
+ """
+ Stores the Contents specified in the Customization
+ Specification file.
+ """
+
+ DNS = 'DNS|NAMESERVER|'
+ SUFFIX = 'DNS|SUFFIX|'
+ PASS = 'PASSWORD|-PASS'
+ TIMEZONE = 'DATETIME|TIMEZONE'
+ UTC = 'DATETIME|UTC'
+ HOSTNAME = 'NETWORK|HOSTNAME'
+ DOMAINNAME = 'NETWORK|DOMAINNAME'
+
+ def __init__(self, configFile):
+ self._configFile = configFile
+
+ @property
+ def host_name(self):
+ """Return the hostname."""
+ return self._configFile.get(Config.HOSTNAME, None)
+
+ @property
+ def domain_name(self):
+ """Return the domain name."""
+ return self._configFile.get(Config.DOMAINNAME, None)
+
+ @property
+ def timezone(self):
+ """Return the timezone."""
+ return self._configFile.get(Config.TIMEZONE, None)
+
+ @property
+ def utc(self):
+ """Retrieves whether to set time to UTC or Local."""
+ return self._configFile.get(Config.UTC, None)
+
+ @property
+ def admin_password(self):
+ """Return the root password to be set."""
+ return self._configFile.get(Config.PASS, None)
+
+ @property
+ def name_servers(self):
+ """Return the list of DNS servers."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.DNS)
+ for i in range(1, cnt + 1):
+ key = Config.DNS + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def dns_suffixes(self):
+ """Return the list of DNS Suffixes."""
+ res = []
+ cnt = self._configFile.get_count_with_prefix(Config.SUFFIX)
+ for i in range(1, cnt + 1):
+ key = Config.SUFFIX + str(i)
+ res.append(self._configFile[key])
+
+ return res
+
+ @property
+ def nics(self):
+ """Return the list of associated NICs."""
+ res = []
+ nics = self._configFile['NIC-CONFIG|NICS']
+ for nic in nics.split(','):
+ res.append(Nic(nic, self._configFile))
+
+ return res
diff --git a/cloudinit/sources/helpers/vmware/imc/config_file.py b/cloudinit/sources/helpers/vmware/imc/config_file.py
new file mode 100644
index 00000000..bb9fb7dc
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_file.py
@@ -0,0 +1,129 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+
+try:
+ import configparser
+except ImportError:
+ import ConfigParser as configparser
+
+from .config_source import ConfigSource
+
+logger = logging.getLogger(__name__)
+
+
+class ConfigFile(ConfigSource, dict):
+ """ConfigFile module to load the content from a specified source."""
+
+ def __init__(self, filename):
+ self._loadConfigFile(filename)
+ pass
+
+ def _insertKey(self, key, val):
+ """
+ Inserts a Key Value pair.
+
+ Keyword arguments:
+ key -- The key to insert
+ val -- The value to insert for the key
+
+ """
+ key = key.strip()
+ val = val.strip()
+
+ if key.startswith('-') or '|-' in key:
+ canLog = False
+ else:
+ canLog = True
+
+ # "sensitive" settings shall not be logged
+ if canLog:
+ logger.debug("ADDED KEY-VAL :: '%s' = '%s'" % (key, val))
+ else:
+ logger.debug("ADDED KEY-VAL :: '%s' = '*****************'" % key)
+
+ self[key] = val
+
+ def _loadConfigFile(self, filename):
+ """
+ Parses properties from the specified config file.
+
+ Any previously available properties will be removed.
+ Sensitive data will not be logged in case the key starts
+ from '-'.
+
+ Keyword arguments:
+ filename - The full path to the config file.
+ """
+ logger.info('Parsing the config file %s.' % filename)
+
+ config = configparser.ConfigParser()
+ config.optionxform = str
+ config.read(filename)
+
+ self.clear()
+
+ for category in config.sections():
+ logger.debug("FOUND CATEGORY = '%s'" % category)
+
+ for (key, value) in config.items(category):
+ self._insertKey(category + '|' + key, value)
+
+ def should_keep_current_value(self, key):
+ """
+ Determines whether a value for a property must be kept.
+
+ If the propery is missing, it is treated as it should be not
+ changed by the engine.
+
+ Keyword arguments:
+ key -- The key to search for.
+ """
+ # helps to distinguish from "empty" value which is used to indicate
+ # "removal"
+ return key not in self
+
+ def should_remove_current_value(self, key):
+ """
+ Determines whether a value for the property must be removed.
+
+ If the specified key is empty, it is treated as it should be
+ removed by the engine.
+
+ Return true if the value can be removed, false otherwise.
+
+ Keyword arguments:
+ key -- The key to search for.
+ """
+ # helps to distinguish from "missing" value which is used to indicate
+ # "keeping unchanged"
+ if key in self:
+ return not bool(self[key])
+ else:
+ return False
+
+ def get_count_with_prefix(self, prefix):
+ """
+ Return the total count of keys that start with the specified prefix.
+
+ Keyword arguments:
+ prefix -- prefix of the key
+ """
+ return len([key for key in self if key.startswith(prefix)])
diff --git a/cloudinit/sources/helpers/vmware/imc/config_namespace.py b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
new file mode 100644
index 00000000..7266b699
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_namespace.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .config_source import ConfigSource
+
+
+class ConfigNamespace(ConfigSource):
+ """Specifies the Config Namespace."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/config_nic.py b/cloudinit/sources/helpers/vmware/imc/config_nic.py
new file mode 100644
index 00000000..77098a05
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_nic.py
@@ -0,0 +1,247 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2016 VMware INC.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import re
+
+from cloudinit import util
+
+logger = logging.getLogger(__name__)
+
+
+class NicConfigurator:
+ def __init__(self, nics):
+ """
+ Initialize the Nic Configurator
+ @param nics (list) an array of nics to configure
+ """
+ self.nics = nics
+ self.mac2Name = {}
+ self.ipv4PrimaryGateway = None
+ self.ipv6PrimaryGateway = None
+ self.find_devices()
+ self._primaryNic = self.get_primary_nic()
+
+ def get_primary_nic(self):
+ """
+ Retrieve the primary nic if it exists
+ @return (NicBase): the primary nic if exists, None otherwise
+ """
+ primary_nics = [nic for nic in self.nics if nic.primary]
+ if not primary_nics:
+ return None
+ elif len(primary_nics) > 1:
+ raise Exception('There can only be one primary nic',
+ [nic.mac for nic in primary_nics])
+ else:
+ return primary_nics[0]
+
+ def find_devices(self):
+ """
+ Create the mac2Name dictionary
+ The mac address(es) are in the lower case
+ """
+ cmd = ['ip', 'addr', 'show']
+ (output, err) = util.subp(cmd)
+ sections = re.split(r'\n\d+: ', '\n' + output)[1:]
+
+ macPat = r'link/ether (([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2}))'
+ for section in sections:
+ match = re.search(macPat, section)
+ if not match: # Only keep info about nics
+ continue
+ mac = match.group(1).lower()
+ name = section.split(':', 1)[0]
+ self.mac2Name[mac] = name
+
+ def gen_one_nic(self, nic):
+ """
+ Return the lines needed to configure a nic
+ @return (str list): the string list to configure the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+ name = self.mac2Name.get(nic.mac.lower())
+ if not name:
+ raise ValueError('No known device has MACADDR: %s' % nic.mac)
+
+ if nic.onboot:
+ lines.append('auto %s' % name)
+
+ # Customize IPv4
+ lines.extend(self.gen_ipv4(name, nic))
+
+ # Customize IPv6
+ lines.extend(self.gen_ipv6(name, nic))
+
+ lines.append('')
+
+ return lines
+
+ def gen_ipv4(self, name, nic):
+ """
+ Return the lines needed to configure the IPv4 setting of a nic
+ @return (str list): the string list to configure the gateways
+ @param name (str): name of the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+
+ bootproto = nic.bootProto.lower()
+ if nic.ipv4_mode.lower() == 'disabled':
+ bootproto = 'manual'
+ lines.append('iface %s inet %s' % (name, bootproto))
+
+ if bootproto != 'static':
+ return lines
+
+ # Static Ipv4
+ v4 = nic.staticIpv4
+ if v4.ip:
+ lines.append(' address %s' % v4.ip)
+ if v4.netmask:
+ lines.append(' netmask %s' % v4.netmask)
+
+ # Add the primary gateway
+ if nic.primary and v4.gateways:
+ self.ipv4PrimaryGateway = v4.gateways[0]
+ lines.append(' gateway %s metric 0' % self.ipv4PrimaryGateway)
+ return lines
+
+ # Add routes if there is no primary nic
+ if not self._primaryNic:
+ lines.extend(self.gen_ipv4_route(nic, v4.gateways))
+
+ return lines
+
+ def gen_ipv4_route(self, nic, gateways):
+ """
+ Return the lines needed to configure additional Ipv4 route
+ @return (str list): the string list to configure the gateways
+ @param nic (NicBase): the nic to configure
+ @param gateways (str list): the list of gateways
+ """
+ lines = []
+
+ for gateway in gateways:
+ lines.append(' up route add default gw %s metric 10000' %
+ gateway)
+
+ return lines
+
+ def gen_ipv6(self, name, nic):
+ """
+ Return the lines needed to configure the gateways for a nic
+ @return (str list): the string list to configure the gateways
+ @param name (str): name of the nic
+ @param nic (NicBase): the nic to configure
+ """
+ lines = []
+
+ if not nic.staticIpv6:
+ return lines
+
+ # Static Ipv6
+ addrs = nic.staticIpv6
+ lines.append('iface %s inet6 static' % name)
+ lines.append(' address %s' % addrs[0].ip)
+ lines.append(' netmask %s' % addrs[0].netmask)
+
+ for addr in addrs[1:]:
+ lines.append(' up ifconfig %s inet6 add %s/%s' % (name, addr.ip,
+ addr.netmask))
+ # Add the primary gateway
+ if nic.primary:
+ for addr in addrs:
+ if addr.gateway:
+ self.ipv6PrimaryGateway = addr.gateway
+ lines.append(' gateway %s' % self.ipv6PrimaryGateway)
+ return lines
+
+ # Add routes if there is no primary nic
+ if not self._primaryNic:
+ lines.extend(self._genIpv6Route(name, nic, addrs))
+
+ return lines
+
+ def _genIpv6Route(self, name, nic, addrs):
+ lines = []
+
+ for addr in addrs:
+ lines.append(' up route -A inet6 add default gw '
+ '%s metric 10000' % addr.gateway)
+
+ return lines
+
+ def generate(self):
+ """Return the lines that is needed to configure the nics"""
+ lines = []
+ lines.append('iface lo inet loopback')
+ lines.append('auto lo')
+ lines.append('')
+
+ for nic in self.nics:
+ lines.extend(self.gen_one_nic(nic))
+
+ return lines
+
+ def clear_dhcp(self):
+ logger.info('Clearing DHCP leases')
+
+ # Ignore the return code 1.
+ util.subp(["pkill", "dhclient"], rcs=[0, 1])
+ util.subp(["rm", "-f", "/var/lib/dhcp/*"])
+
+ def if_down_up(self):
+ names = []
+ for nic in self.nics:
+ name = self.mac2Name.get(nic.mac.lower())
+ names.append(name)
+
+ for name in names:
+ logger.info('Bring down interface %s' % name)
+ util.subp(["ifdown", "%s" % name])
+
+ self.clear_dhcp()
+
+ for name in names:
+ logger.info('Bring up interface %s' % name)
+ util.subp(["ifup", "%s" % name])
+
+ def configure(self):
+ """
+ Configure the /etc/network/intefaces
+ Make a back up of the original
+ """
+ containingDir = '/etc/network'
+
+ interfaceFile = os.path.join(containingDir, 'interfaces')
+ originalFile = os.path.join(containingDir,
+ 'interfaces.before_vmware_customization')
+
+ if not os.path.exists(originalFile) and os.path.exists(interfaceFile):
+ os.rename(interfaceFile, originalFile)
+
+ lines = self.generate()
+ with open(interfaceFile, 'w') as fp:
+ for line in lines:
+ fp.write('%s\n' % line)
+
+ self.if_down_up()
diff --git a/cloudinit/sources/helpers/vmware/imc/config_source.py b/cloudinit/sources/helpers/vmware/imc/config_source.py
new file mode 100644
index 00000000..a367e476
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/config_source.py
@@ -0,0 +1,23 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class ConfigSource:
+ """Specifies a source for the Config Content."""
+ pass
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_error.py b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
new file mode 100644
index 00000000..1b04161f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_error.py
@@ -0,0 +1,24 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustErrorEnum:
+ """Specifies different errors of Guest Customization engine"""
+
+ GUESTCUST_ERROR_SUCCESS = 0
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_event.py b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
new file mode 100644
index 00000000..fc22568f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_event.py
@@ -0,0 +1,27 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustEventEnum:
+ """Specifies different types of Guest Customization Events"""
+
+ GUESTCUST_EVENT_CUSTOMIZE_FAILED = 100
+ GUESTCUST_EVENT_NETWORK_SETUP_FAILED = 101
+ GUESTCUST_EVENT_ENABLE_NICS = 103
+ GUESTCUST_EVENT_QUERY_NICS = 104
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_state.py b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
new file mode 100644
index 00000000..f255be5f
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_state.py
@@ -0,0 +1,25 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class GuestCustStateEnum:
+ """Specifies different states of Guest Customization engine"""
+
+ GUESTCUST_STATE_RUNNING = 4
+ GUESTCUST_STATE_DONE = 5
diff --git a/cloudinit/sources/helpers/vmware/imc/guestcust_util.py b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
new file mode 100644
index 00000000..d39f0a65
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/guestcust_util.py
@@ -0,0 +1,128 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2016 Canonical Ltd.
+# Copyright (C) 2016 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+import logging
+import os
+import time
+
+from cloudinit import util
+
+from .guestcust_state import GuestCustStateEnum
+from .guestcust_event import GuestCustEventEnum
+
+logger = logging.getLogger(__name__)
+
+
+CLOUDINIT_LOG_FILE = "/var/log/cloud-init.log"
+QUERY_NICS_SUPPORTED = "queryNicsSupported"
+NICS_STATUS_CONNECTED = "connected"
+
+
+# This will send a RPC command to the underlying
+# VMware Virtualization Platform.
+def send_rpc(rpc):
+ if not rpc:
+ return None
+
+ out = ""
+ err = "Error sending the RPC command"
+
+ try:
+ logger.debug("Sending RPC command: %s", rpc)
+ (out, err) = util.subp(["vmware-rpctool", rpc], rcs=[0])
+ # Remove the trailing newline in the output.
+ if out:
+ out = out.rstrip()
+ except Exception as e:
+ logger.debug("Failed to send RPC command")
+ logger.exception(e)
+
+ return (out, err)
+
+
+# This will send the customization status to the
+# underlying VMware Virtualization Platform.
+def set_customization_status(custstate, custerror, errormessage=None):
+ message = ""
+
+ if errormessage:
+ message = CLOUDINIT_LOG_FILE + "@" + errormessage
+ else:
+ message = CLOUDINIT_LOG_FILE
+
+ rpc = "deployPkg.update.state %d %d %s" % (custstate, custerror, message)
+ (out, err) = send_rpc(rpc)
+ return (out, err)
+
+
+# This will read the file nics.txt in the specified directory
+# and return the content
+def get_nics_to_enable(dirpath):
+ if not dirpath:
+ return None
+
+ NICS_SIZE = 1024
+ nicsfilepath = os.path.join(dirpath, "nics.txt")
+ if not os.path.exists(nicsfilepath):
+ return None
+
+ with open(nicsfilepath, 'r') as fp:
+ nics = fp.read(NICS_SIZE)
+
+ return nics
+
+
+# This will send a RPC command to the underlying VMware Virtualization platform
+# and enable nics.
+def enable_nics(nics):
+ if not nics:
+ logger.warning("No Nics found")
+ return
+
+ enableNicsWaitRetries = 5
+ enableNicsWaitCount = 5
+ enableNicsWaitSeconds = 1
+
+ for attempt in range(0, enableNicsWaitRetries):
+ logger.debug("Trying to connect interfaces, attempt %d", attempt)
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_ENABLE_NICS,
+ nics)
+ if not out:
+ time.sleep(enableNicsWaitCount * enableNicsWaitSeconds)
+ continue
+
+ if out != QUERY_NICS_SUPPORTED:
+ logger.warning("NICS connection status query is not supported")
+ return
+
+ for count in range(0, enableNicsWaitCount):
+ (out, err) = set_customization_status(
+ GuestCustStateEnum.GUESTCUST_STATE_RUNNING,
+ GuestCustEventEnum.GUESTCUST_EVENT_QUERY_NICS,
+ nics)
+ if out and out == NICS_STATUS_CONNECTED:
+ logger.info("NICS are connected on %d second", count)
+ return
+
+ time.sleep(enableNicsWaitSeconds)
+
+ logger.warning("Can't connect network interfaces after %d attempts",
+ enableNicsWaitRetries)
diff --git a/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
new file mode 100644
index 00000000..33f88726
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/ipv4_mode.py
@@ -0,0 +1,45 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class Ipv4ModeEnum:
+ """
+ The IPv4 configuration mode which directly represents the user's goal.
+
+ This mode effectively acts as a contract of the in-guest customization
+ engine. It must be set based on what the user has requested and should
+ not be changed by those layers. It's up to the in-guest engine to
+ interpret and materialize the user's request.
+ """
+
+ # The legacy mode which only allows dhcp/static based on whether IPv4
+ # addresses list is empty or not
+ IPV4_MODE_BACKWARDS_COMPATIBLE = 'BACKWARDS_COMPATIBLE'
+
+ # IPv4 must use static address. Reserved for future use
+ IPV4_MODE_STATIC = 'STATIC'
+
+ # IPv4 must use DHCPv4. Reserved for future use
+ IPV4_MODE_DHCP = 'DHCP'
+
+ # IPv4 must be disabled
+ IPV4_MODE_DISABLED = 'DISABLED'
+
+ # IPv4 settings should be left untouched. Reserved for future use
+ IPV4_MODE_AS_IS = 'AS_IS'
diff --git a/cloudinit/sources/helpers/vmware/imc/nic.py b/cloudinit/sources/helpers/vmware/imc/nic.py
new file mode 100644
index 00000000..b5d704ea
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic.py
@@ -0,0 +1,147 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+from .boot_proto import BootProtoEnum
+from .nic_base import NicBase, StaticIpv4Base, StaticIpv6Base
+
+
+class Nic(NicBase):
+ """
+ Holds the information about each NIC specified
+ in the customization specification file
+ """
+
+ def __init__(self, name, configFile):
+ self._name = name
+ self._configFile = configFile
+
+ def _get(self, what):
+ return self._configFile.get(self.name + '|' + what, None)
+
+ def _get_count_with_prefix(self, prefix):
+ return self._configFile.get_count_with_prefix(self.name + prefix)
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def mac(self):
+ return self._get('MACADDR').lower()
+
+ @property
+ def primary(self):
+ value = self._get('PRIMARY')
+ if value:
+ value = value.lower()
+ return value == 'yes' or value == 'true'
+ else:
+ return False
+
+ @property
+ def onboot(self):
+ value = self._get('ONBOOT')
+ if value:
+ value = value.lower()
+ return value == 'yes' or value == 'true'
+ else:
+ return False
+
+ @property
+ def bootProto(self):
+ value = self._get('BOOTPROTO')
+ if value:
+ return value.lower()
+ else:
+ return ""
+
+ @property
+ def ipv4_mode(self):
+ value = self._get('IPv4_MODE')
+ if value:
+ return value.lower()
+ else:
+ return ""
+
+ @property
+ def staticIpv4(self):
+ """
+ Checks the BOOTPROTO property and returns StaticIPv4Addr
+ configuration object if STATIC configuration is set.
+ """
+ if self.bootProto == BootProtoEnum.STATIC:
+ return [StaticIpv4Addr(self)]
+ else:
+ return None
+
+ @property
+ def staticIpv6(self):
+ cnt = self._get_count_with_prefix('|IPv6ADDR|')
+
+ if not cnt:
+ return None
+
+ result = []
+ for index in range(1, cnt + 1):
+ result.append(StaticIpv6Addr(self, index))
+
+ return result
+
+
+class StaticIpv4Addr(StaticIpv4Base):
+ """Static IPV4 Setting."""
+
+ def __init__(self, nic):
+ self._nic = nic
+
+ @property
+ def ip(self):
+ return self._nic._get('IPADDR')
+
+ @property
+ def netmask(self):
+ return self._nic._get('NETMASK')
+
+ @property
+ def gateways(self):
+ value = self._nic._get('GATEWAY')
+ if value:
+ return [x.strip() for x in value.split(',')]
+ else:
+ return None
+
+
+class StaticIpv6Addr(StaticIpv6Base):
+ """Static IPV6 Address."""
+
+ def __init__(self, nic, index):
+ self._nic = nic
+ self._index = index
+
+ @property
+ def ip(self):
+ return self._nic._get('IPv6ADDR|' + str(self._index))
+
+ @property
+ def netmask(self):
+ return self._nic._get('IPv6NETMASK|' + str(self._index))
+
+ @property
+ def gateway(self):
+ return self._nic._get('IPv6GATEWAY|' + str(self._index))
diff --git a/cloudinit/sources/helpers/vmware/imc/nic_base.py b/cloudinit/sources/helpers/vmware/imc/nic_base.py
new file mode 100644
index 00000000..030ba311
--- /dev/null
+++ b/cloudinit/sources/helpers/vmware/imc/nic_base.py
@@ -0,0 +1,154 @@
+# vi: ts=4 expandtab
+#
+# Copyright (C) 2015 Canonical Ltd.
+# Copyright (C) 2015 VMware Inc.
+#
+# Author: Sankar Tanguturi <stanguturi@vmware.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License version 3, as
+# published by the Free Software Foundation.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+
+
+class NicBase:
+ """
+ Define what are expected of each nic.
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def mac(self):
+ """
+ Retrieves the mac address of the nic
+ @return (str) : the MACADDR setting
+ """
+ raise NotImplementedError('MACADDR')
+
+ @property
+ def primary(self):
+ """
+ Retrieves whether the nic is the primary nic
+ Indicates whether NIC will be used to define the default gateway.
+ If none of the NICs is configured to be primary, default gateway won't
+ be set.
+ @return (bool): the PRIMARY setting
+ """
+ raise NotImplementedError('PRIMARY')
+
+ @property
+ def onboot(self):
+ """
+ Retrieves whether the nic should be up at the boot time
+ @return (bool) : the ONBOOT setting
+ """
+ raise NotImplementedError('ONBOOT')
+
+ @property
+ def bootProto(self):
+ """
+ Retrieves the boot protocol of the nic
+ @return (str): the BOOTPROTO setting, valid values: dhcp and static.
+ """
+ raise NotImplementedError('BOOTPROTO')
+
+ @property
+ def ipv4_mode(self):
+ """
+ Retrieves the IPv4_MODE
+ @return (str): the IPv4_MODE setting, valid values:
+ backwards_compatible, static, dhcp, disabled, as_is
+ """
+ raise NotImplementedError('IPv4_MODE')
+
+ @property
+ def staticIpv4(self):
+ """
+ Retrieves the static IPv4 configuration of the nic
+ @return (StaticIpv4Base list): the static ipv4 setting
+ """
+ raise NotImplementedError('Static IPv4')
+
+ @property
+ def staticIpv6(self):
+ """
+ Retrieves the IPv6 configuration of the nic
+ @return (StaticIpv6Base list): the static ipv6 setting
+ """
+ raise NotImplementedError('Static Ipv6')
+
+ def validate(self):
+ """
+ Validate the object
+ For example, the staticIpv4 property is required and should not be
+ empty when ipv4Mode is STATIC
+ """
+ raise NotImplementedError('Check constraints on properties')
+
+
+class StaticIpv4Base:
+ """
+ Define what are expected of a static IPv4 setting
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def ip(self):
+ """
+ Retrieves the Ipv4 address
+ @return (str): the IPADDR setting
+ """
+ raise NotImplementedError('Ipv4 Address')
+
+ @property
+ def netmask(self):
+ """
+ Retrieves the Ipv4 NETMASK setting
+ @return (str): the NETMASK setting
+ """
+ raise NotImplementedError('Ipv4 NETMASK')
+
+ @property
+ def gateways(self):
+ """
+ Retrieves the gateways on this Ipv4 subnet
+ @return (str list): the GATEWAY setting
+ """
+ raise NotImplementedError('Ipv4 GATEWAY')
+
+
+class StaticIpv6Base:
+ """Define what are expected of a static IPv6 setting
+ The following properties should be provided in an implementation class.
+ """
+
+ @property
+ def ip(self):
+ """
+ Retrieves the Ipv6 address
+ @return (str): the IPv6ADDR setting
+ """
+ raise NotImplementedError('Ipv6 Address')
+
+ @property
+ def netmask(self):
+ """
+ Retrieves the Ipv6 NETMASK setting
+ @return (str): the IPv6NETMASK setting
+ """
+ raise NotImplementedError('Ipv6 NETMASK')
+
+ @property
+ def gateway(self):
+ """
+ Retrieves the Ipv6 GATEWAY setting
+ @return (str): the IPv6GATEWAY setting
+ """
+ raise NotImplementedError('Ipv6 GATEWAY')